code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Any = ['pixel_values']
def __init__( self : Tuple , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : List[str] , ):
super().__init__(**__snake_case )
UpperCAmelCase_ = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , ):
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
UpperCAmelCase_ = (size['''height'''], size['''width'''])
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : str , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : bool = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=__snake_case )
return encoded_outputs
| 144
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k',
'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v',
'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q',
'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u',
'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v',
'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out',
'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos',
'self_attn.rotary_emb': 'encoder.embed_positions',
'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm',
'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1',
'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2',
'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv',
'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm',
'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm',
'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense',
'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense',
'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm',
'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense',
'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense',
'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "running_mean":
lowercase__ = value
elif weight_type == "running_var":
lowercase__ = value
elif weight_type == "num_batches_tracked":
lowercase__ = value
elif weight_type == "inv_freq":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "pos_bias_u" in name:
lowercase__ = None
elif "pos_bias_v" in name:
lowercase__ = None
elif "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = '''weight'''
elif "running_mean" in name:
lowercase__ = '''running_mean'''
elif "inv_freq" in name:
lowercase__ = '''inv_freq'''
elif "running_var" in name:
lowercase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowercase__ = '''num_batches_tracked'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ):
"""simple docstring"""
if config_path is not None:
lowercase__ = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE , hidden_act='''swish''' )
else:
lowercase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=SCREAMING_SNAKE_CASE , )
lowercase__ = True if config.feat_extract_norm == '''layer''' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
lowercase__ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE )
else:
lowercase__ = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned:
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task='''audio_pretraining''' )
lowercase__ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE )
lowercase__ = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowerCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 429
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: GenericTensor ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase_ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def lowerCamelCase_ ( self: str , UpperCamelCase_: GenericTensor ) -> np.ndarray:
"""simple docstring"""
lowercase__ = self.get_masked_index(UpperCamelCase_ )
lowercase__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: GenericTensor ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: Optional[Any] ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
lowercase__ = self.framework
lowercase__ = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
self.ensure_exactly_one_mask_token(UpperCamelCase_ )
return model_inputs
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str ) -> Dict:
"""simple docstring"""
lowercase__ = self.model(**UpperCamelCase_ )
lowercase__ = model_inputs['''input_ids''']
return model_outputs
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: Optional[int]=5 , UpperCamelCase_: Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase__ = target_ids.shape[0]
lowercase__ = model_outputs['''input_ids'''][0]
lowercase__ = model_outputs['''logits''']
if self.framework == "tf":
lowercase__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase__ = outputs.numpy()
lowercase__ = outputs[0, masked_index, :]
lowercase__ = stable_softmax(UpperCamelCase_ , axis=-1 )
if target_ids is not None:
lowercase__ = tf.gather_nd(tf.squeeze(UpperCamelCase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
lowercase__ = tf.expand_dims(UpperCamelCase_ , 0 )
lowercase__ = tf.math.top_k(UpperCamelCase_ , k=UpperCamelCase_ )
lowercase__ , lowercase__ = topk.values.numpy(), topk.indices.numpy()
else:
lowercase__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=UpperCamelCase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase__ = outputs[0, masked_index, :]
lowercase__ = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase__ = probs[..., target_ids]
lowercase__ , lowercase__ = probs.topk(UpperCamelCase_ )
lowercase__ = []
lowercase__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowercase__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowercase__ = input_ids.numpy().copy()
if target_ids is not None:
lowercase__ = target_ids[p].tolist()
lowercase__ = p
# Filter padding out:
lowercase__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase__ = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase__ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(UpperCamelCase_ )
result.append(UpperCamelCase_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str]=None ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = [targets]
try:
lowercase__ = self.tokenizer.get_vocab()
except Exception:
lowercase__ = {}
lowercase__ = []
for target in targets:
lowercase__ = vocab.get(UpperCamelCase_ , UpperCamelCase_ )
if id_ is None:
lowercase__ = self.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , max_length=1 , truncation=UpperCamelCase_ , )['''input_ids''']
if len(UpperCamelCase_ ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
lowercase__ = list(set(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase__ = np.array(UpperCamelCase_ )
return target_ids
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Union[str, Any]=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {}
if targets is not None:
lowercase__ = self.get_target_ids(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = target_ids
if top_k is not None:
lowercase__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self: List[str] , UpperCamelCase_: int , *UpperCamelCase_: int , **UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1:
return outputs[0]
return outputs
| 429
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case: str = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__snake_case: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 577
|
'''simple docstring'''
def _snake_case ( A_ : str , A_ : str ):
"""simple docstring"""
if not (isinstance(A_ , A_ ) and isinstance(A_ , A_ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
a_ : Optional[int] = len(A_ )
a_ : Dict = len(A_ )
a_ : Tuple = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
a_ : str = 0
a_ : Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
a_ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
a_ : Tuple = i
a_ : Union[str, Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 577
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a : Dict = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Union[str, Any] , a_ : List[Any]=32 ):
"""simple docstring"""
set_seed(0 )
__snake_case = UNetaDModel(sample_size=a_ , in_channels=3 , out_channels=3 )
__snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=a_ , )
__snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=a_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(a_ ) for _ in range(4 )]
__snake_case = [torch.randn((4, 3, 32, 32) ).to(a_ ) for _ in range(4 )]
__snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(a_ ) for _ in range(4 )]
# train with a DDPM scheduler
__snake_case , __snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(a_ )
for i in range(4 ):
optimizer.zero_grad()
__snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case = model(a_ , timesteps[i] ).sample
__snake_case = torch.nn.functional.mse_loss(a_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__snake_case , __snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(a_ )
for i in range(4 ):
optimizer.zero_grad()
__snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case = model(a_ , timesteps[i] ).sample
__snake_case = torch.nn.functional.mse_loss(a_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) )
self.assertTrue(torch.allclose(a_ , a_ , atol=1e-5 ) )
| 680
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _A ,unittest.TestCase ):
_a = UnCLIPImageVariationPipeline
_a = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
_a = IMAGE_VARIATION_BATCH_PARAMS
_a = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
_a = False
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return 3_2
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
return 3_2
@property
def UpperCAmelCase__ ( self : Dict):
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Any):
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : Dict):
return 1_0_0
@property
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def UpperCAmelCase__ ( self : Dict):
torch.manual_seed(0)
lowerCAmelCase_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_a)
@property
def UpperCAmelCase__ ( self : int):
torch.manual_seed(0)
lowerCAmelCase_ : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(_a)
@property
def UpperCAmelCase__ ( self : Tuple):
torch.manual_seed(0)
lowerCAmelCase_ : Optional[int] = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
lowerCAmelCase_ : Any = UnCLIPTextProjModel(**_a)
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any]):
torch.manual_seed(0)
lowerCAmelCase_ : Optional[int] = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
lowerCAmelCase_ : int = UNetaDConditionModel(**_a)
return model
@property
def UpperCAmelCase__ ( self : str):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase__ ( self : Optional[Any]):
torch.manual_seed(0)
lowerCAmelCase_ : str = UNetaDModel(**self.dummy_super_res_kwargs)
return model
@property
def UpperCAmelCase__ ( self : List[Any]):
torch.manual_seed(1)
lowerCAmelCase_ : Dict = UNetaDModel(**self.dummy_super_res_kwargs)
return model
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[int] = self.dummy_decoder
lowerCAmelCase_ : Any = self.dummy_text_proj
lowerCAmelCase_ : List[Any] = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : Tuple = self.dummy_super_res_first
lowerCAmelCase_ : Dict = self.dummy_super_res_last
lowerCAmelCase_ : List[Any] = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
lowerCAmelCase_ : Tuple = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
lowerCAmelCase_ : List[Any] = CLIPImageProcessor(crop_size=3_2 , size=3_2)
lowerCAmelCase_ : Optional[int] = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any]=0 , A_ : str=True):
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_a)).to(_a)
if str(_a).startswith('''mps'''):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(_a)
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=_a).manual_seed(_a)
if pil_image:
lowerCAmelCase_ : List[str] = input_image * 0.5 + 0.5
lowerCAmelCase_ : int = input_image.clamp(0 , 1)
lowerCAmelCase_ : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
lowerCAmelCase_ : Dict = DiffusionPipeline.numpy_to_pil(_a)[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : str = '''cpu'''
lowerCAmelCase_ : Any = self.get_dummy_components()
lowerCAmelCase_ : List[Any] = self.pipeline_class(**_a)
lowerCAmelCase_ : Union[str, Any] = pipe.to(_a)
pipe.set_progress_bar_config(disable=_a)
lowerCAmelCase_ : Dict = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : int = pipe(**_a)
lowerCAmelCase_ : Optional[int] = output.images
lowerCAmelCase_ : Dict = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : str = pipe(
**_a , return_dict=_a , )[0]
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ : List[str] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[int] = '''cpu'''
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**_a)
lowerCAmelCase_ : Optional[Any] = pipe.to(_a)
pipe.set_progress_bar_config(disable=_a)
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : List[Any] = pipe(**_a)
lowerCAmelCase_ : Dict = output.images
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : Any = pipe(
**_a , return_dict=_a , )[0]
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ : List[Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = '''cpu'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : Any = self.pipeline_class(**_a)
lowerCAmelCase_ : Optional[Any] = pipe.to(_a)
pipe.set_progress_bar_config(disable=_a)
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : List[str] = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
lowerCAmelCase_ : Union[str, Any] = pipe(**_a)
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : Optional[int] = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
lowerCAmelCase_ : List[str] = pipe(
**_a , return_dict=_a , )[0]
lowerCAmelCase_ : str = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
lowerCAmelCase_ : Dict = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = torch.device('''cpu''')
class __snake_case :
_a = 1
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : Optional[Any] = self.pipeline_class(**_a)
lowerCAmelCase_ : List[Any] = pipe.to(_a)
pipe.set_progress_bar_config(disable=_a)
lowerCAmelCase_ : List[str] = torch.Generator(device=_a).manual_seed(0)
lowerCAmelCase_ : Tuple = pipe.decoder.dtype
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Any = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase_ : Optional[Any] = pipe.prepare_latents(
_a , dtype=_a , device=_a , generator=_a , latents=_a , scheduler=DummyScheduler())
lowerCAmelCase_ : str = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase_ : Union[str, Any] = pipe.prepare_latents(
_a , dtype=_a , device=_a , generator=_a , latents=_a , scheduler=DummyScheduler())
lowerCAmelCase_ : str = self.get_dummy_inputs(_a , pil_image=_a)
lowerCAmelCase_ : str = pipe(
**_a , decoder_latents=_a , super_res_latents=_a).images
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(_a , pil_image=_a)
# Don't pass image, instead pass embedding
lowerCAmelCase_ : Optional[Any] = pipeline_inputs.pop('''image''')
lowerCAmelCase_ : Any = pipe.image_encoder(_a).image_embeds
lowerCAmelCase_ : List[Any] = pipe(
**_a , decoder_latents=_a , super_res_latents=_a , image_embeddings=_a , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a).max() < 1e-4
@skip_mps
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[int] = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase_ : Optional[int] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_a , expected_max_diff=_a)
@skip_mps
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : List[str] = torch_device == '''cpu'''
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Dict = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_a , relax_max_difference=_a , additional_params_copy_to_batched_inputs=_a , )
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase_ : List[str] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_a , additional_params_copy_to_batched_inputs=_a , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_a)
@skip_mps
def UpperCAmelCase__ ( self : Optional[int]):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase__ ( self : List[Any]):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self : List[Any]):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''')
lowerCAmelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''')
lowerCAmelCase_ : Union[str, Any] = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa)
lowerCAmelCase_ : List[Any] = pipeline.to(_a)
pipeline.set_progress_bar_config(disable=_a)
lowerCAmelCase_ : List[Any] = torch.Generator(device='''cpu''').manual_seed(0)
lowerCAmelCase_ : Optional[int] = pipeline(
_a , generator=_a , output_type='''np''' , )
lowerCAmelCase_ : List[str] = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(_a , _a , 1_5)
| 171
|
'''simple docstring'''
from math import sqrt
def lowerCAmelCase_ ( a : int = 1000000 ):
a__ = 0
a__ = 0
a__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 394
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = job['''started_at''']
lowercase_ = job['''completed_at''']
lowercase_ = date_parser.parse(snake_case__ )
lowercase_ = date_parser.parse(snake_case__ )
lowercase_ = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
lowercase_ = start
lowercase_ = end
lowercase_ = duration_in_min
return job_info
def a ( snake_case__: Any , snake_case__: Dict=None ):
'''simple docstring'''
lowercase_ = None
if token is not None:
lowercase_ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase_ = requests.get(snake_case__ , headers=snake_case__ ).json()
lowercase_ = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(snake_case__ ) for job in result['''jobs''']} )
lowercase_ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(snake_case__ ):
lowercase_ = requests.get(url + F'''&page={i + 2}''' , headers=snake_case__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(snake_case__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
__a = parser.parse_args()
__a = get_job_time(args.workflow_run_id)
__a = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"{k}: {v['duration']}")
| 409
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = PegasusTokenizer
a :Any = PegasusTokenizerFast
a :Optional[Any] = True
a :int = True
def _lowercase ( self : Any ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : List[str] ) -> Any:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowercase ( self : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return ("This is a test", "This is a test")
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = '''</s>'''
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_1_0_3 )
def _lowercase ( self : Tuple ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[str]:
lowercase_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase_ = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowercase_ = '''To ensure a smooth flow of bank resolutions.'''
lowercase_ = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ = tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowercase ( self : int ) -> Tuple:
lowercase_ = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowercase_ = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = self._large_tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask.
@slow
def _lowercase ( self : int ) -> Tuple:
# fmt: off
lowercase_ = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = PegasusTokenizer
a :List[Any] = PegasusTokenizerFast
a :Union[str, Any] = True
a :List[Any] = True
def _lowercase ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = PegasusTokenizer(SCREAMING_SNAKE_CASE_ , offset=0 , mask_token_sent=SCREAMING_SNAKE_CASE_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Any:
return ("This is a test", "This is a test")
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase_ = rust_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
lowercase_ = py_tokenizer([raw_input_str] , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowercase_ = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = self._large_tokenizer(
text_target=SCREAMING_SNAKE_CASE_ , max_length=5 , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(SCREAMING_SNAKE_CASE_ ) == 2 # input_ids, attention_mask.
def _lowercase ( self : Optional[int] ) -> Any:
lowercase_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase_ = self._large_tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 409
| 1
|
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE__ : List[Any] = n - k
# Calculate C(n,k)
for i in range(_snake_case ):
result *= n - i
result //= i + 1
return result
def lowercase_ ( _snake_case ):
return binomial_coefficient(2 * node_count ,_snake_case ) // (node_count + 1)
def lowercase_ ( _snake_case ):
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
SCREAMING_SNAKE_CASE__ : int = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def lowercase_ ( _snake_case ):
return catalan_number(_snake_case ) * factorial(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 223
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = '''Hello world! cécé herlolip'''
_lowerCAmelCase = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=UpperCamelCase , large=UpperCamelCase , share_emb=UpperCamelCase , use_bert_emb=UpperCamelCase , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase__ : int = torch.load(UpperCamelCase , lambda UpperCamelCase , UpperCamelCase : storage )
lowerCAmelCase__ : List[str] = AbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) , UpperCamelCase )
original.eval()
lowerCAmelCase__ : Optional[Any] = BertAbsSummarizer(UpperCamelCase , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
lowerCAmelCase__ : Tuple = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
lowerCAmelCase__ : Optional[int] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) )
lowerCAmelCase__ : List[Any] = torch.tensor(UpperCamelCase ).unsqueeze(0 )
lowerCAmelCase__ : str = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(UpperCamelCase )) )
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase__ : Dict = encoder_input_ids
lowerCAmelCase__ : Tuple = decoder_input_ids
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : List[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase__ : Optional[Any] = original(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0]
lowerCAmelCase__ : Optional[Any] = original.generator(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = new_model(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )[0]
lowerCAmelCase__ : int = new_model.generator(UpperCamelCase )
lowerCAmelCase__ : str = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) )
lowerCAmelCase__ : Union[str, Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(UpperCamelCase ) )
lowerCAmelCase__ : Dict = torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 565
| 0
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase ( A_ )-> Any:
'''simple docstring'''
if isinstance(A_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
"""simple docstring"""
def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]):
pass
def __snake_case ( self : Any):
pass
def __snake_case ( self : int):
pass
def __snake_case ( self : Optional[int] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float):
a : Dict = np.abs((a - b)).max()
self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , f'''Difference between torch and flax is {diff} (>= {tol}).''')
def __snake_case ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : str=None , **__UpperCAmelCase : Dict):
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = FlaxVisionTextDualEncoderModel(__UpperCAmelCase)
a : Optional[Any] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim))
def __snake_case ( self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Union[str, Any]):
a , a : Optional[int] = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase)
a : str = {"vision_model": vision_model, "text_model": text_model}
a : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase)
a : Union[str, Any] = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim))
def __snake_case ( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=None , **__UpperCAmelCase : List[Any]):
a , a : List[str] = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
a : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase)
a : Dict = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a : List[Any] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase)
a : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase)
a : int = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a : Dict = after_output[0]
a : Optional[int] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__UpperCAmelCase , 1e-3)
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : str):
a , a : Union[str, Any] = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = {"vision_model": vision_model, "text_model": text_model}
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase)
a : int = model(
input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase)
a : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(__UpperCAmelCase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : int = to_atuple(vision_model.config.image_size)
a : Optional[Any] = to_atuple(vision_model.config.patch_size)
a : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a : Dict = output.text_model_output.attentions
self.assertEqual(len(__UpperCAmelCase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __snake_case ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
pt_model.to(__UpperCAmelCase)
pt_model.eval()
# prepare inputs
a : List[str] = inputs_dict
a : Tuple = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()}
with torch.no_grad():
a : List[str] = pt_model(**__UpperCAmelCase).to_tuple()
a : int = fx_model(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]):
self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4e-2)
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase)
a : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase)
a : str = fx_model_loaded(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase) , "Output lengths differ between Flax and PyTorch")
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]):
self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4e-2)
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase)
a : Optional[int] = VisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase)
pt_model_loaded.to(__UpperCAmelCase)
pt_model_loaded.eval()
with torch.no_grad():
a : Dict = pt_model_loaded(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase) , "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]):
self.assert_almost_equals(__UpperCAmelCase , pt_output_loaded.numpy() , 4e-2)
def __snake_case ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]):
a : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = VisionTextDualEncoderModel(__UpperCAmelCase)
a : Any = FlaxVisionTextDualEncoderModel(__UpperCAmelCase)
a : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase)
a : Optional[Any] = fx_state
self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple):
a : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase)
a : int = VisionTextDualEncoderModel(__UpperCAmelCase)
a : List[Any] = FlaxVisionTextDualEncoderModel(__UpperCAmelCase)
a : Optional[int] = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params)
self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCAmelCase)
def __snake_case ( self : str):
a : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCAmelCase)
@is_pt_flax_cross_test
def __snake_case ( self : Optional[int]):
a : List[str] = self.prepare_config_and_inputs()
a : Any = config_inputs_dict.pop("vision_config")
a : Optional[Any] = config_inputs_dict.pop("text_config")
a : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
self.check_equivalence_flax_to_pt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
@slow
def __snake_case ( self : int):
a , a : str = self.get_pretrained_model_and_inputs()
a : str = model_a(**__UpperCAmelCase)
a : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCAmelCase)
a : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase)
a : Optional[Any] = model_a(**__UpperCAmelCase)
a : Any = after_outputs[0]
a : List[str] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__UpperCAmelCase , 1e-5)
@require_flax
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[str]):
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , )
a : List[Any] = 13
a : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
a : Optional[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
a : str = random_attention_mask([batch_size, 4])
a : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Dict):
a : int = FlaxViTModel(__UpperCAmelCase)
a : Optional[Any] = FlaxBertModel(__UpperCAmelCase)
return vision_model, text_model
def __snake_case ( self : Union[str, Any]):
a : str = FlaxViTModelTester(self)
a : Any = FlaxBertModelTester(self)
a : List[str] = vit_model_tester.prepare_config_and_inputs()
a : str = bert_model_tester.prepare_config_and_inputs()
a , a : Dict = vision_config_and_inputs
a , a , a , a : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Tuple):
a : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , )
a : List[str] = 13
a : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
])
a : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size)
a : List[str] = random_attention_mask([batch_size, 4])
a : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
a : int = FlaxCLIPVisionModel(__UpperCAmelCase)
a : List[str] = FlaxBertModel(__UpperCAmelCase)
return vision_model, text_model
def __snake_case ( self : Union[str, Any]):
a : Tuple = FlaxCLIPVisionModelTester(self)
a : int = FlaxBertModelTester(self)
a : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
a : List[str] = bert_model_tester.prepare_config_and_inputs()
a , a : List[str] = vision_config_and_inputs
a , a , a , a : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Any):
a : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0)
a : int = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian")
a : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a : List[Any] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np")
a : int = model(**__UpperCAmelCase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : List[Any] = np.array([[1.2_284_727, 0.3_104_122]])
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1e-3))
| 135
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """poolformer"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : str=3 , __UpperCAmelCase : Tuple=16 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Optional[int]=4.0 , __UpperCAmelCase : Tuple=[2, 2, 6, 2] , __UpperCAmelCase : Union[str, Any]=[64, 128, 320, 512] , __UpperCAmelCase : Any=[7, 3, 3, 3] , __UpperCAmelCase : Dict=[4, 2, 2, 2] , __UpperCAmelCase : List[str]=[2, 1, 1, 1] , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=1e-5 , __UpperCAmelCase : Dict=0.02 , **__UpperCAmelCase : Any , ):
a : str = num_channels
a : Dict = patch_size
a : List[Any] = stride
a : Tuple = padding
a : str = pool_size
a : List[str] = hidden_sizes
a : List[str] = mlp_ratio
a : Optional[int] = depths
a : str = patch_sizes
a : Optional[Any] = strides
a : List[Any] = num_encoder_blocks
a : int = drop_path_rate
a : Any = hidden_act
a : Optional[Any] = use_layer_scale
a : str = layer_scale_init_value
a : int = initializer_range
super().__init__(**__UpperCAmelCase)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = version.parse("""1.11""" )
@property
def __snake_case ( self : List[Any]):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __snake_case ( self : Union[str, Any]):
return 2e-3
| 135
| 1
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = ["image_processor"]
__lowerCAmelCase = "SamImageProcessor"
def __init__( self : List[str], UpperCamelCase__ : Tuple ) -> Tuple:
super().__init__(UpperCamelCase__ )
_A = self.image_processor
_A = -10
_A = self.image_processor.size['longest_edge']
def __call__( self : Tuple, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Union[str, TensorType]] = None, **UpperCamelCase__ : Dict, ) -> BatchEncoding:
_A = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__, )
# pop arguments that are not used in the foward but used nevertheless
_A = encoding_image_processor['original_sizes']
if hasattr(UpperCamelCase__, 'numpy' ): # Checks if Torch or TF tensor
_A = original_sizes.numpy()
_A , _A , _A = self._check_and_preprocess_points(
input_points=UpperCamelCase__, input_labels=UpperCamelCase__, input_boxes=UpperCamelCase__, )
_A = self._normalize_and_convert(
UpperCamelCase__, UpperCamelCase__, input_points=UpperCamelCase__, input_labels=UpperCamelCase__, input_boxes=UpperCamelCase__, return_tensors=UpperCamelCase__, )
return encoding_image_processor
def __UpperCAmelCase ( self : str, UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Any="pt", ) -> str:
if input_points is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
_A = [
self._normalize_coordinates(self.target_size, UpperCamelCase__, original_sizes[0] ) for point in input_points
]
else:
_A = [
self._normalize_coordinates(self.target_size, UpperCamelCase__, UpperCamelCase__ )
for point, original_size in zip(UpperCamelCase__, UpperCamelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_A , _A = self._pad_points_and_labels(UpperCamelCase__, UpperCamelCase__ )
_A = np.array(UpperCamelCase__ )
if input_labels is not None:
_A = np.array(UpperCamelCase__ )
if input_boxes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
_A = [
self._normalize_coordinates(self.target_size, UpperCamelCase__, original_sizes[0], is_bounding_box=UpperCamelCase__ )
for box in input_boxes
]
else:
_A = [
self._normalize_coordinates(self.target_size, UpperCamelCase__, UpperCamelCase__, is_bounding_box=UpperCamelCase__ )
for box, original_size in zip(UpperCamelCase__, UpperCamelCase__ )
]
_A = np.array(UpperCamelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# boxes batch size of 1 by default
_A = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# boxes batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__, 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
_A = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__, 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_A = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
_A = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_A = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
_A = tf.expand_dims(UpperCamelCase__, 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : int ) -> List[Any]:
_A = max([point.shape[0] for point in input_points] )
_A = []
for i, point in enumerate(UpperCamelCase__ ):
if point.shape[0] != expected_nb_points:
_A = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value], axis=0 )
_A = np.append(input_labels[i], [self.point_pad_value] )
processed_input_points.append(UpperCamelCase__ )
_A = processed_input_points
return input_points, input_labels
def __UpperCAmelCase ( self : str, UpperCamelCase__ : int, UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any=False ) -> np.ndarray:
_A , _A = original_size
_A , _A = self.image_processor._get_preprocess_shape(UpperCamelCase__, longest_edge=UpperCamelCase__ )
_A = deepcopy(UpperCamelCase__ ).astype(UpperCamelCase__ )
if is_bounding_box:
_A = coords.reshape(-1, 2, 2 )
_A = coords[..., 0] * (new_w / old_w)
_A = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_A = coords.reshape(-1, 4 )
return coords
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Optional[int]=None, ) -> str:
if input_points is not None:
if hasattr(UpperCamelCase__, 'numpy' ): # Checks for TF or Torch tensor
_A = input_points.numpy().tolist()
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or not isinstance(input_points[0], UpperCamelCase__ ):
raise ValueError('Input points must be a list of list of floating points.' )
_A = [np.array(UpperCamelCase__ ) for input_point in input_points]
else:
_A = None
if input_labels is not None:
if hasattr(UpperCamelCase__, 'numpy' ):
_A = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or not isinstance(input_labels[0], UpperCamelCase__ ):
raise ValueError('Input labels must be a list of list integers.' )
_A = [np.array(UpperCamelCase__ ) for label in input_labels]
else:
_A = None
if input_boxes is not None:
if hasattr(UpperCamelCase__, 'numpy' ):
_A = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase__, UpperCamelCase__ )
or not isinstance(input_boxes[0], UpperCamelCase__ )
or not isinstance(input_boxes[0][0], UpperCamelCase__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_A = [np.array(UpperCamelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
_A = None
return input_points, input_labels, input_boxes
@property
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_A = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase__ ) )
def __UpperCAmelCase ( self : Optional[Any], *UpperCamelCase__ : int, **UpperCamelCase__ : Any ) -> Dict:
return self.image_processor.post_process_masks(*UpperCamelCase__, **UpperCamelCase__ )
| 107
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class a__ :
_A = PegasusConfig
_A = {}
_A = "gelu"
def __init__( self : int , A_ : int , A_ : List[str]=13 , A_ : Optional[Any]=7 , A_ : Optional[Any]=True , A_ : Optional[int]=False , A_ : int=99 , A_ : List[Any]=32 , A_ : Optional[int]=2 , A_ : Tuple=4 , A_ : Optional[Any]=37 , A_ : List[Any]=0.1 , A_ : Any=0.1 , A_ : Optional[Any]=40 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : Tuple=0 , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_: str = parent
lowerCamelCase_: int = batch_size
lowerCamelCase_: Any = seq_length
lowerCamelCase_: Optional[int] = is_training
lowerCamelCase_: Union[str, Any] = use_labels
lowerCamelCase_: List[Any] = vocab_size
lowerCamelCase_: Dict = hidden_size
lowerCamelCase_: str = num_hidden_layers
lowerCamelCase_: List[str] = num_attention_heads
lowerCamelCase_: List[Any] = intermediate_size
lowerCamelCase_: List[Any] = hidden_dropout_prob
lowerCamelCase_: Any = attention_probs_dropout_prob
lowerCamelCase_: int = max_position_embeddings
lowerCamelCase_: int = eos_token_id
lowerCamelCase_: Optional[int] = pad_token_id
lowerCamelCase_: str = bos_token_id
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase_: str = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_: List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase_: List[Any] = prepare_pegasus_inputs_dict(A_ , A_ , A_ )
return config, inputs_dict
def lowerCAmelCase ( self : List[str] , A_ : Optional[Any] , A_ : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: Tuple = TFPegasusModel(config=A_ ).get_decoder()
lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""]
lowerCamelCase_: int = input_ids[:1, :]
lowerCamelCase_: List[Any] = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase_: Union[str, Any] = inputs_dict["""head_mask"""]
lowerCamelCase_: Tuple = 1
# first forward pass
lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ )
lowerCamelCase_ , lowerCamelCase_: Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_: Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase_: Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase_: Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase_: int = model(A_ , attention_mask=A_ )[0]
lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase_: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase_: Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(A_ , A_ , rtol=1e-3 )
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if attention_mask is None:
lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase_: List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase_: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_A = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_A = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A = True
_A = False
_A = False
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_: Tuple = TFPegasusModelTester(self )
lowerCamelCase_: Optional[int] = ConfigTester(self , config_class=A_ )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*A_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
_A = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_A = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_A = "google/pegasus-xsum"
@cached_property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase ( self : str , **A_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = self.translate_src_text(**A_ )
assert self.expected_text == generated_words
def lowerCAmelCase ( self : str , **A_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors="""tf""" )
lowerCamelCase_: str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , )
lowerCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )
return generated_words
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 423
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( __lowerCAmelCase = 200 ) -> int:
'''simple docstring'''
lowerCamelCase__ =[1, 2, 5, 10, 20, 50, 100, 200]
lowerCamelCase__ =[0] * (pence + 1)
lowerCamelCase__ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 132
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=24 , _lowerCamelCase=2 , _lowerCamelCase=6 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=1000 , ):
lowerCamelCase__ =parent
lowerCamelCase__ =batch_size
lowerCamelCase__ =seq_length
lowerCamelCase__ =is_training
lowerCamelCase__ =use_input_mask
lowerCamelCase__ =use_token_type_ids
lowerCamelCase__ =use_labels
lowerCamelCase__ =vocab_size
lowerCamelCase__ =hidden_size
lowerCamelCase__ =num_hidden_layers
lowerCamelCase__ =num_attention_heads
lowerCamelCase__ =intermediate_size
lowerCamelCase__ =hidden_act
lowerCamelCase__ =hidden_dropout_prob
lowerCamelCase__ =attention_probs_dropout_prob
lowerCamelCase__ =max_position_embeddings
lowerCamelCase__ =type_vocab_size
lowerCamelCase__ =type_sequence_label_size
lowerCamelCase__ =initializer_range
lowerCamelCase__ =num_labels
lowerCamelCase__ =scope
lowerCamelCase__ =range_bbox
def _a ( self ):
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ =bbox[i, j, 3]
lowerCamelCase__ =bbox[i, j, 1]
lowerCamelCase__ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ =bbox[i, j, 2]
lowerCamelCase__ =bbox[i, j, 0]
lowerCamelCase__ =t
lowerCamelCase__ =None
if self.use_input_mask:
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase__ =None
if self.use_token_type_ids:
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ =None
lowerCamelCase__ =None
if self.use_labels:
lowerCamelCase__ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ =LiltModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase__ =model(_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase )
lowerCamelCase__ =model(_lowerCamelCase , bbox=_lowerCamelCase , token_type_ids=_lowerCamelCase )
lowerCamelCase__ =model(_lowerCamelCase , bbox=_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ =self.num_labels
lowerCamelCase__ =LiltForTokenClassification(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase__ =model(
_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ =LiltForQuestionAnswering(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCamelCase__ =model(
_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ):
lowerCamelCase__ =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) =config_and_inputs
lowerCamelCase__ ={
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
A__ : Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ : int = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : int = False
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
return True
def _a ( self ):
lowerCamelCase__ =LiltModelTester(self )
lowerCamelCase__ =ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def _a ( self ):
self.config_tester.run_common_tests()
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ =type
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
def _a ( self ):
lowerCamelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
@slow
def _a ( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ =LiltModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@require_torch
@slow
class __UpperCAmelCase ( unittest.TestCase ):
def _a ( self ):
lowerCamelCase__ =LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_lowerCamelCase )
lowerCamelCase__ =torch.tensor([[1, 2]] , device=_lowerCamelCase )
lowerCamelCase__ =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ =model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase )
lowerCamelCase__ =torch.Size([1, 2, 768] )
lowerCamelCase__ =torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=_lowerCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCamelCase , atol=1E-3 ) )
| 132
| 1
|
"""simple docstring"""
import os
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(_SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(_SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 473
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = """Speech2TextFeatureExtractor"""
lowerCAmelCase__ : Union[str, Any] = """Speech2TextTokenizer"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a_ : str = self.feature_extractor
a_ : Dict = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a_ : Any = kwargs.pop("raw_speech" )
else:
a_ : Tuple = kwargs.pop("audio" , _SCREAMING_SNAKE_CASE )
a_ : int = kwargs.pop("sampling_rate" , _SCREAMING_SNAKE_CASE )
a_ : Dict = kwargs.pop("text" , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
a_ : List[Any] = args[0]
a_ : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a_ : int = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
a_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ : Any = encodings["input_ids"]
return inputs
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def A ( self ) -> List[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a_ : Tuple = True
a_ : Tuple = self.tokenizer
yield
a_ : int = self.feature_extractor
a_ : str = False
| 473
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ : int = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None) -> int:
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.')
for split in splits[:-1]:
UpperCamelCase = getattr(_lowercase , _lowercase)
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.')
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.')
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_lowercase , _lowercase)
if old_value.device == torch.device('meta') and device not in ["meta", torch.device('meta')] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn , 'Params4bit') and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_lowercase)
elif isinstance(_lowercase , torch.Tensor):
UpperCamelCase = value.to('cpu')
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse(
'0.37.2')
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')
else:
UpperCamelCase = torch.tensor(_lowercase , device='cpu')
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase).to(_lowercase)
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase).to(_lowercase)
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_lowercase))
else:
if value is None:
UpperCamelCase = old_value.to(_lowercase)
elif isinstance(_lowercase , torch.Tensor):
UpperCamelCase = value.to(_lowercase)
else:
UpperCamelCase = torch.tensor(_lowercase , device=_lowercase)
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad)
UpperCamelCase = new_value
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False) -> Union[str, Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_lowercase)
if (isinstance(_lowercase , nn.Linear) or isinstance(_lowercase , _lowercase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_lowercase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(_lowercase , _lowercase):
UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_lowercase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase)
if len(list(module.children())) > 0:
UpperCamelCase = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None) -> List[str]:
"""simple docstring"""
UpperCamelCase = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase)
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.')
return model
def lowercase__ ( *_UpperCamelCase , **_UpperCamelCase) -> Tuple:
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase)
def lowercase__ ( *_UpperCamelCase , **_UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase)
def lowercase__ ( _UpperCamelCase) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = deepcopy(_lowercase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_lowercase)
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase):
UpperCamelCase = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
UpperCamelCase = sum(_lowercase , [])
UpperCamelCase = len(_lowercase) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_lowercase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children())
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_lowercase) - set(_lowercase)
UpperCamelCase = list(set(_lowercase)) + list(_lowercase)
# remove ".weight" from the keys
UpperCamelCase = ['''.weight''', '''.bias''']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_lowercase , '')
filtered_module_names.append(_lowercase)
return filtered_module_names
| 712
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = MvpTokenizer
snake_case__ = MvpTokenizerFast
snake_case__ = True
snake_case__ = filter_roberta_detectors
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCamelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : int , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
UpperCamelCase = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('labels' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , _SCREAMING_SNAKE_CASE )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['A long paragraph for summarization.']
UpperCamelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , text_target=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = 'A, <mask> AllenNLP sentence.'
UpperCamelCase = tokenizer_r.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer_p.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_SCREAMING_SNAKE_CASE , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 410
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a ( unittest.TestCase , __UpperCAmelCase ):
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__lowerCAmelCase = load_tool("text-to-speech" )
self.tool.setup()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("hey" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = self.tool("hey" )
__lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 611
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class snake_case_ ( _UpperCAmelCase):
lowerCamelCase :Any = """xlm-prophetnet"""
lowerCamelCase :Union[str, Any] = ["""past_key_values"""]
lowerCamelCase :Optional[int] = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self , __lowercase = 0.1 , __lowercase = "gelu" , __lowercase = 3_0_5_2_2 , __lowercase = 1_0_2_4 , __lowercase = 4_0_9_6 , __lowercase = 1_2 , __lowercase = 1_6 , __lowercase = 4_0_9_6 , __lowercase = 1_2 , __lowercase = 1_6 , __lowercase = 0.1 , __lowercase = 0.1 , __lowercase = 5_1_2 , __lowercase = 0.0_2 , __lowercase = True , __lowercase = True , __lowercase = 0 , __lowercase = 2 , __lowercase = 3_2 , __lowercase = 1_2_8 , __lowercase = False , __lowercase = 0.0 , __lowercase = True , __lowercase = 0 , __lowercase = 1 , __lowercase = 2 , **__lowercase , ) -> Dict:
lowerCamelCase : Dict =vocab_size
lowerCamelCase : Optional[int] =hidden_size
lowerCamelCase : Tuple =encoder_ffn_dim
lowerCamelCase : Optional[Any] =num_encoder_layers
lowerCamelCase : Dict =num_encoder_attention_heads
lowerCamelCase : Dict =decoder_ffn_dim
lowerCamelCase : Union[str, Any] =num_decoder_layers
lowerCamelCase : Tuple =num_decoder_attention_heads
lowerCamelCase : Optional[int] =max_position_embeddings
lowerCamelCase : str =init_std # Normal(0, this parameter)
lowerCamelCase : Optional[Any] =activation_function
# parameters for xlmprophetnet
lowerCamelCase : Dict =ngram
lowerCamelCase : List[Any] =num_buckets
lowerCamelCase : Union[str, Any] =relative_max_distance
lowerCamelCase : Optional[int] =disable_ngram_loss
lowerCamelCase : Optional[int] =eps
# 3 Types of Dropout
lowerCamelCase : Optional[Any] =attention_dropout
lowerCamelCase : Optional[int] =activation_dropout
lowerCamelCase : Any =dropout
lowerCamelCase : Tuple =use_cache
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , add_cross_attention=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
@property
def __lowercase ( self ) -> Union[str, Any]:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def __lowercase ( self , __lowercase ) -> Dict:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 704
|
def A__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
lowerCamelCase : Optional[Any] =[int(SCREAMING_SNAKE_CASE_ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE_ ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE_ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
snake_case_ = input().strip()
snake_case_ = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 262
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : int = StableDiffusionXLImgaImgPipeline
__a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__a : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__a : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__a : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
__lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
__lowerCamelCase : List[Any] = CLIPTextModel(__a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__a )
__lowerCamelCase : List[Any] = CLIPTextModelWithProjection(__a )
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=__a )
__lowerCamelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def snake_case_ ( self , __a , __a=0 ):
__lowerCamelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith('mps' ):
__lowerCamelCase : Dict = torch.manual_seed(__a )
else:
__lowerCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
__lowerCamelCase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def snake_case_ ( self ):
__lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
__lowerCamelCase : Dict = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Any = self.get_dummy_inputs(__a )
__lowerCamelCase : Tuple = sd_pipe(**__a ).images
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : List[str] = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
__lowerCamelCase : List[Any] = sd_pipe.to(__a )
__lowerCamelCase : Optional[int] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
__lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
__lowerCamelCase : Dict = 3 * ['this is a negative prompt']
__lowerCamelCase : Optional[int] = negative_prompt
__lowerCamelCase : List[str] = 3 * [inputs['prompt']]
__lowerCamelCase : Any = sd_pipe(**__a )
__lowerCamelCase : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCamelCase : Dict = self.get_dummy_inputs(__a )
__lowerCamelCase : str = 3 * ['this is a negative prompt']
__lowerCamelCase : List[str] = 3 * [inputs.pop('prompt' )]
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : List[str] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
__lowerCamelCase : Union[str, Any] = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
__lowerCamelCase : Any = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , __a , __a="cpu" , __a=torch.floataa , __a=0 ):
__lowerCamelCase : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowerCamelCase : List[str] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
__lowerCamelCase : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a )
__lowerCamelCase : Optional[int] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCamelCase : Dict = self.get_inputs(__a )
__lowerCamelCase : Optional[Any] = pipe(**__a ).images
__lowerCamelCase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase : str = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 594
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : List[Any] = ['image_processor', 'tokenizer']
__a : List[Any] = 'BlipImageProcessor'
__a : str = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , __a , __a ):
__lowerCamelCase : str = False
super().__init__(__a , __a )
__lowerCamelCase : Union[str, Any] = self.image_processor
def __call__( self , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__lowerCamelCase : List[Any] = self.tokenizer
__lowerCamelCase : List[str] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
return text_encoding
# add pixel_values
__lowerCamelCase : Any = self.image_processor(__a , return_tensors=__a )
if text is not None:
__lowerCamelCase : Tuple = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
else:
__lowerCamelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(__a )
return encoding_image_processor
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case_ ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case_ ( self ):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 594
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_MASKED_LM_MAPPING
_lowercase = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case__ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
snake_case__ : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 38_015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 25_506, """token_str""": """ accuser"""},
] , )
snake_case__ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 38_015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 25_506,
"""token_str""": """ accuser""",
},
] , )
snake_case__ : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCAmelCase ( self : List[str] )-> str:
snake_case__ : Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
snake_case__ : Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 35_676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case__ : List[str] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS"""},
] , )
snake_case__ : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2_941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 13_606, """token_str""": """ Clara"""},
] , )
snake_case__ : List[Any] = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 35_676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 16_416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def __lowerCAmelCase ( self : Dict )-> Tuple:
snake_case__ : int = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
snake_case__ : str = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
@slow
@require_torch
def __lowerCAmelCase ( self : Union[str, Any] )-> int:
snake_case__ : Optional[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(lowerCamelCase )
@slow
@require_tf
def __lowerCAmelCase ( self : Any )-> Tuple:
snake_case__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : Tuple )-> List[Any]:
snake_case__ : Dict = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1_573, """token_str""": """ Chris"""},
] , )
snake_case__ : Optional[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2_201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12_790,
"""token_str""": """ Lyon""",
},
] , )
snake_case__ : Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3_499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13_606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2_941, """token_str""": """ Te"""},
] , )
@require_torch
def __lowerCAmelCase ( self : Any )-> List[str]:
snake_case__ : Union[str, Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
self.run_pipeline_test(lowerCamelCase , [] )
@require_tf
def __lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case__ : List[Any] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
snake_case__ : Dict = None
snake_case__ : str = None
self.run_pipeline_test(lowerCamelCase , [] )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] )-> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
snake_case__ : List[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
snake_case__ : Any = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCAmelCase ( self : int , lowerCamelCase : Any , lowerCamelCase : List[Any] )-> Union[str, Any]:
snake_case__ : int = fill_masker.tokenizer
snake_case__ : Optional[int] = fill_masker.model
snake_case__ : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
snake_case__ : List[Any] = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
snake_case__ : Tuple = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase , [
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] , )
with self.assertRaises(lowerCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase ):
fill_masker("""This is""" )
self.run_test_top_k(lowerCamelCase , lowerCamelCase )
self.run_test_targets(lowerCamelCase , lowerCamelCase )
self.run_test_top_k_targets(lowerCamelCase , lowerCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase , lowerCamelCase )
self.fill_mask_with_multiple_masks(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] )-> Dict:
snake_case__ : List[Any] = tokenizer.get_vocab()
snake_case__ : List[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case__ : Union[str, Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , targets=lowerCamelCase )
snake_case__ : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
snake_case__ : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCamelCase )
snake_case__ : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCamelCase ) )
# Call argument
snake_case__ : int = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
snake_case__ : str = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
snake_case__ : str = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , lowerCamelCase )
snake_case__ : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCamelCase ) )
# Score equivalence
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase )
snake_case__ : str = [top_mask["""token_str"""] for top_mask in outputs]
snake_case__ : Union[str, Any] = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ) == set(lowerCamelCase ):
snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase )
snake_case__ : List[Any] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase ):
snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(lowerCamelCase ):
snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="""""" )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] )-> int:
snake_case__ : Optional[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase , top_k=2 )
snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
snake_case__ : Union[str, Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase , [
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
] , )
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
def __lowerCAmelCase ( self : str , lowerCamelCase : List[str] , lowerCamelCase : Tuple )-> Dict:
snake_case__ : str = tokenizer.get_vocab()
snake_case__ : Optional[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
# top_k=2, ntargets=3
snake_case__ : Tuple = sorted(vocab.keys() )[:3]
snake_case__ : Optional[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowerCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case__ : Dict = [el["""token_str"""] for el in sorted(lowerCamelCase , key=lambda lowerCamelCase : x["score"] , reverse=lowerCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase ).issubset(lowerCamelCase ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowerCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase ) , nested_simplify(lowerCamelCase ) )
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : int )-> Dict:
snake_case__ : Optional[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
snake_case__ : str = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case__ : List[str] = sorted(vocab.keys() )[:3]
snake_case__ : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case__ : Optional[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=lowerCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase ) , 3 )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Dict )-> List[str]:
snake_case__ : Optional[Any] = FillMaskPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
snake_case__ : str = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase , [
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
[
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
{"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )},
],
] , )
| 701
|
'''simple docstring'''
class _A :
'''simple docstring'''
def __init__( self : List[Any] )-> List[str]:
snake_case__ : List[str] = """"""
snake_case__ : Dict = """"""
snake_case__ : Union[str, Any] = []
def __lowerCAmelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : int )-> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
snake_case__ : List[Any] = self.__min_dist_top_down_dp(lowerCamelCase , n - 1 )
snake_case__ : Any = self.__min_dist_top_down_dp(m - 1 , lowerCamelCase )
snake_case__ : Optional[int] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
snake_case__ : Dict = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : str , lowerCamelCase : str )-> int:
snake_case__ : Optional[int] = worda
snake_case__ : List[str] = worda
snake_case__ : List[str] = [[-1 for _ in range(len(lowerCamelCase ) )] for _ in range(len(lowerCamelCase ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase ) - 1 , len(lowerCamelCase ) - 1 )
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : str )-> int:
snake_case__ : List[str] = worda
snake_case__ : int = worda
snake_case__ : Any = len(lowerCamelCase )
snake_case__ : List[str] = len(lowerCamelCase )
snake_case__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
snake_case__ : Union[str, Any] = j
elif j == 0: # second string is empty
snake_case__ : List[str] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
snake_case__ : Tuple = self.dp[i - 1][j - 1]
else:
snake_case__ : int = self.dp[i][j - 1]
snake_case__ : List[Any] = self.dp[i - 1][j]
snake_case__ : List[str] = self.dp[i - 1][j - 1]
snake_case__ : Tuple = 1 + min(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase__ = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowerCAmelCase__ = input('Enter the first string: ').strip()
lowerCAmelCase__ = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 172
| 0
|
from __future__ import annotations
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> int | float:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''find_max() arg is an empty sequence''' )
if (
left >= len(_SCREAMING_SNAKE_CASE )
or left < -len(_SCREAMING_SNAKE_CASE )
or right >= len(_SCREAMING_SNAKE_CASE )
or right < -len(_SCREAMING_SNAKE_CASE )
):
raise IndexError('''list index out of range''' )
if left == right:
return nums[left]
snake_case : Optional[Any] = (left + right) >> 1 # the middle
snake_case : Tuple = find_max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # find max in range[left, mid]
snake_case : List[str] = find_max(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 598
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spm_char.model"""}
lowercase_ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowercase_ = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Any , a : Any="<s>" , a : List[Any]="</s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : Optional[Dict[str, Any]] = None , **a : Optional[Any] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> str:
"""simple docstring"""
return self.sp_model.piece_to_id(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(a )
return token
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[Any]=None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 235
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
UpperCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
UpperCamelCase__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class a ( lowercase ):
UpperCamelCase : Optional[Any] = """whisper"""
UpperCamelCase : Optional[Any] = ["""past_key_values"""]
UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase_=51_865 , UpperCamelCase_=80 , UpperCamelCase_=6 , UpperCamelCase_=4 , UpperCamelCase_=6 , UpperCamelCase_=4 , UpperCamelCase_=1_536 , UpperCamelCase_=1_536 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=50_257 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="gelu" , UpperCamelCase_=256 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=False , UpperCamelCase_=1_500 , UpperCamelCase_=448 , UpperCamelCase_=50_256 , UpperCamelCase_=50_256 , UpperCamelCase_=50_256 , UpperCamelCase_=None , UpperCamelCase_=[220, 50_256] , UpperCamelCase_=False , UpperCamelCase_=256 , UpperCamelCase_=False , UpperCamelCase_=0.05 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_=7 , **UpperCamelCase_ , ):
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Union[str, Any] = num_mel_bins
UpperCAmelCase__ : Dict = d_model
UpperCAmelCase__ : List[str] = encoder_layers
UpperCAmelCase__ : Union[str, Any] = encoder_attention_heads
UpperCAmelCase__ : Optional[Any] = decoder_layers
UpperCAmelCase__ : List[Any] = decoder_attention_heads
UpperCAmelCase__ : Optional[int] = decoder_ffn_dim
UpperCAmelCase__ : Optional[int] = encoder_ffn_dim
UpperCAmelCase__ : Tuple = dropout
UpperCAmelCase__ : Dict = attention_dropout
UpperCAmelCase__ : int = activation_dropout
UpperCAmelCase__ : int = activation_function
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : int = encoder_layerdrop
UpperCAmelCase__ : Dict = decoder_layerdrop
UpperCAmelCase__ : Optional[int] = use_cache
UpperCAmelCase__ : int = encoder_layers
UpperCAmelCase__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase__ : Optional[Any] = max_source_positions
UpperCAmelCase__ : Optional[int] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : List[str] = classifier_proj_size
UpperCAmelCase__ : Union[str, Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : Union[str, Any] = apply_spec_augment
UpperCAmelCase__ : Tuple = mask_time_prob
UpperCAmelCase__ : Optional[int] = mask_time_length
UpperCAmelCase__ : List[str] = mask_time_min_masks
UpperCAmelCase__ : List[str] = mask_feature_prob
UpperCAmelCase__ : Any = mask_feature_length
UpperCAmelCase__ : str = mask_feature_min_masks
UpperCAmelCase__ : Dict = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , suppress_tokens=UpperCamelCase_ , begin_suppress_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
class a ( lowercase ):
@property
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCAmelCase__ : Tuple = {0: 'batch'}
else:
UpperCAmelCase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase_ , direction='inputs' )
return common_inputs
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 22_050 , UpperCamelCase_ = 5.0 , UpperCamelCase_ = 220 , ):
UpperCAmelCase__ : Union[str, Any] = OrderedDict()
UpperCAmelCase__ : Optional[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase_ , framework=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , time_duration=UpperCamelCase_ , frequency=UpperCamelCase_ , )
UpperCAmelCase__ : str = encoder_inputs['input_features'].shape[2]
UpperCAmelCase__ : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCAmelCase__ : str = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = encoder_inputs.pop('input_features' )
UpperCAmelCase__ : Union[str, Any] = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCAmelCase__ : Tuple = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __snake_case ( self ):
return 1E-3
| 254
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a ( lowercase ):
UpperCamelCase : Dict = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **UpperCamelCase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ : int = deprecated_arg[3:]
UpperCAmelCase__ : Tuple = not kwargs.pop(UpperCamelCase_ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ : Dict = kwargs.pop('tpu_name' , self.tpu_name )
UpperCAmelCase__ : Tuple = kwargs.pop('device_idx' , self.device_idx )
UpperCAmelCase__ : List[str] = kwargs.pop('eager_mode' , self.eager_mode )
UpperCAmelCase__ : Any = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**UpperCamelCase_ )
UpperCamelCase : str = field(
default=lowercase , metadata={"""help""": """Name of TPU"""} , )
UpperCamelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
UpperCamelCase : bool = field(default=lowercase , metadata={"""help""": """Benchmark models in eager model."""} )
UpperCamelCase : bool = field(
default=lowercase , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
UpperCAmelCase__ : Optional[Any] = None
if self.tpu:
try:
if self.tpu_name:
UpperCAmelCase__ : str = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
UpperCAmelCase__ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
UpperCAmelCase__ : Tuple = None
return tpu
@cached_property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
UpperCAmelCase__ : Union[str, Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
UpperCAmelCase__ : Any = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __snake_case ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __snake_case ( self ):
return self.n_gpu > 0
| 254
| 1
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_a : Union[str, Any] = None
_a : Optional[Any] = BloomTokenizerFast
_a : Optional[int] = BloomTokenizerFast
_a : str = True
_a : Any = False
_a : str = '''tokenizer_file'''
_a : str = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def UpperCAmelCase__( self ) -> Union[str, Any]:
super().setUp()
lowercase__ : Optional[int] = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> int:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : List[str] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowercase__ : List[str] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase__ : List[str] = tokenizer.batch_encode_plus(lowerCAmelCase__ )["input_ids"]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : Optional[int] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__=6 ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ : Optional[Any] = "This is a simple input"
lowercase__ : Any = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Dict = ("This is a simple input", "This is a pair")
lowercase__ : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
lowercase__ : List[str] = None # Hotfixing padding = None
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : Any = self.get_rust_tokenizer()
lowercase__ : Dict = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowerCAmelCase__ )
lowercase__ : Optional[Any] = next(iter(lowerCAmelCase__ ) )["premise"] # pick up one data
lowercase__ : List[str] = list(sample_data.values() )
lowercase__ : str = list(map(tokenizer.encode , lowerCAmelCase__ ) )
lowercase__ : Optional[int] = [tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 200
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Dict = 384
__UpperCamelCase : List[str] = 7
if "tiny" in model_name:
__UpperCamelCase : List[Any] = 96
__UpperCamelCase : Union[str, Any] = (2, 2, 6, 2)
__UpperCamelCase : int = (3, 6, 12, 24)
elif "small" in model_name:
__UpperCamelCase : List[Any] = 96
__UpperCamelCase : List[Any] = (2, 2, 18, 2)
__UpperCamelCase : int = (3, 6, 12, 24)
elif "base" in model_name:
__UpperCamelCase : List[str] = 128
__UpperCamelCase : Any = (2, 2, 18, 2)
__UpperCamelCase : Any = (4, 8, 16, 32)
__UpperCamelCase : int = 12
__UpperCamelCase : Dict = 512
elif "large" in model_name:
__UpperCamelCase : List[Any] = 192
__UpperCamelCase : Any = (2, 2, 18, 2)
__UpperCamelCase : str = (6, 12, 24, 48)
__UpperCamelCase : str = 12
__UpperCamelCase : int = 768
# set label information
__UpperCamelCase : Dict = 150
__UpperCamelCase : List[str] = "huggingface/label-files"
__UpperCamelCase : Tuple = "ade20k-id2label.json"
__UpperCamelCase : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : str = {v: k for k, v in idalabel.items()}
__UpperCamelCase : int = SwinConfig(
embed_dim=snake_case__ , depths=snake_case__ , num_heads=snake_case__ , window_size=snake_case__ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__UpperCamelCase : Tuple = UperNetConfig(
backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , )
return config
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = dct.pop(snake_case__ )
__UpperCamelCase : int = val
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase : Union[str, Any] = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
__UpperCamelCase : List[Any] = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Optional[int] = in_proj_weight[:dim, :]
__UpperCamelCase : Any = in_proj_bias[: dim]
__UpperCamelCase : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase : Optional[Any] = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase : Dict = in_proj_weight[
-dim :, :
]
__UpperCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase , __UpperCamelCase : List[str] = x.shape
__UpperCamelCase : Any = x.reshape(snake_case__ , 4 , in_channel // 4 )
__UpperCamelCase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase , __UpperCamelCase : List[str] = x.shape
__UpperCamelCase : Dict = x.reshape(snake_case__ , in_channel // 4 , 4 )
__UpperCamelCase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ )
return x
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[Any] = x.shape[0]
__UpperCamelCase : List[str] = x.reshape(4 , in_channel // 4 )
__UpperCamelCase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case__ )
return x
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : List[str] = x.shape[0]
__UpperCamelCase : List[Any] = x.reshape(in_channel // 4 , 4 )
__UpperCamelCase : List[str] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case__ )
return x
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : str = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCamelCase : int = model_name_to_url[model_name]
__UpperCamelCase : str = torch.hub.load_state_dict_from_url(snake_case__ , map_location="cpu" , file_name=snake_case__ )[
"state_dict"
]
for name, param in state_dict.items():
print(snake_case__ , param.shape )
__UpperCamelCase : Optional[int] = get_upernet_config(snake_case__ )
__UpperCamelCase : Union[str, Any] = UperNetForSemanticSegmentation(snake_case__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCamelCase : List[Any] = state_dict.pop(snake_case__ )
if "bn" in key:
__UpperCamelCase : str = key.replace("bn" , "batch_norm" )
__UpperCamelCase : Dict = val
# rename keys
__UpperCamelCase : List[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCamelCase : Tuple = reverse_correct_unfold_reduction_order(snake_case__ )
if "norm" in key:
__UpperCamelCase : List[str] = reverse_correct_unfold_norm_order(snake_case__ )
model.load_state_dict(snake_case__ )
# verify on image
__UpperCamelCase : Optional[Any] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
__UpperCamelCase : Union[str, Any] = SegformerImageProcessor()
__UpperCamelCase : int = processor(snake_case__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCamelCase : int = model(snake_case__ )
__UpperCamelCase : str = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCamelCase : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__UpperCamelCase : Dict = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__UpperCamelCase : int = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__UpperCamelCase : List[str] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(snake_case__ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 399
|
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(snake_case__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 399
| 1
|
from math import pi
def _snake_case (_snake_case : int , _snake_case : int) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 181
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : str ='''mra'''
def __init__( self :Any, snake_case :List[str]=5_0265, snake_case :List[Any]=768, snake_case :Optional[Any]=12, snake_case :Optional[Any]=12, snake_case :str=3072, snake_case :Tuple="gelu", snake_case :Optional[int]=0.1, snake_case :int=0.1, snake_case :Any=512, snake_case :Union[str, Any]=1, snake_case :Union[str, Any]=0.0_2, snake_case :List[Any]=1e-5, snake_case :Optional[int]="absolute", snake_case :Optional[int]=4, snake_case :str="full", snake_case :Optional[int]=0, snake_case :List[Any]=0, snake_case :int=1, snake_case :List[Any]=0, snake_case :Dict=2, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case)
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =type_vocab_size
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =block_per_row
_lowercase =approx_mode
_lowercase =initial_prior_first_n_blocks
_lowercase =initial_prior_diagonal_n_blocks
| 181
| 1
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Optional[Any] = '''roberta'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = RobertaEmbeddings(__SCREAMING_SNAKE_CASE)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Tuple = '''roberta'''
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeRobertaModel(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(config.hidden_dropout_prob)
__a = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=-1 , __SCREAMING_SNAKE_CASE : List[Any]=False , ):
'''simple docstring'''
__a = self.num_layers
try:
__a = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
__a = outputs[1]
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__SCREAMING_SNAKE_CASE)
if train_highway:
__a = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 60
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def a__ ( A__, A__, A__, A__ = 1_0_0, ):
SCREAMING_SNAKE_CASE_ : Tuple = x_start
SCREAMING_SNAKE_CASE_ : Dict = fnc(A__ )
SCREAMING_SNAKE_CASE_ : Tuple = 0.0
for _ in range(A__ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE_ : int = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ : Any = fnc(A__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE_ : int = xa
SCREAMING_SNAKE_CASE_ : Optional[int] = fxa
return length
if __name__ == "__main__":
def a__ ( A__ ):
return math.sin(1_0 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
lowerCAmelCase__ : Union[str, Any] =10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 101
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
lowerCAmelCase__ = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {F"funnel-transformer/{name}": 512 for name in _model_names}
lowerCAmelCase__ = {F"funnel-transformer/{name}": {'do_lower_case': True} for name in _model_names}
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = 2
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<sep>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_="##" , **lowerCAmelCase_ , ):
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , clean_text=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , wordpieces_prefix=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowercase = getattr(lowerCAmelCase_ , normalizer_state.pop("type" ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**lowerCAmelCase_ )
__lowercase = do_lower_case
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
__lowercase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 321
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Dict:
if attention_mask is None:
SCREAMING_SNAKE_CASE : Any = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase :
'''simple docstring'''
UpperCAmelCase : Dict = OPTConfig
UpperCAmelCase : List[Any] = {}
UpperCAmelCase : Tuple = 'gelu'
def __init__( self : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any]=13 , snake_case : int=7 , snake_case : List[Any]=True , snake_case : Tuple=False , snake_case : List[str]=99 , snake_case : Optional[Any]=16 , snake_case : List[Any]=2 , snake_case : str=4 , snake_case : Dict=4 , snake_case : str="gelu" , snake_case : str=0.1 , snake_case : Optional[int]=0.1 , snake_case : Union[str, Any]=20 , snake_case : Union[str, Any]=2 , snake_case : Any=1 , snake_case : Tuple=0 , snake_case : str=16 , snake_case : Optional[int]=16 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : int = pad_token_id
SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
SCREAMING_SNAKE_CASE : str = embed_dim
SCREAMING_SNAKE_CASE : Tuple = word_embed_proj_dim
SCREAMING_SNAKE_CASE : Optional[Any] = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case , **self.config_updates , )
SCREAMING_SNAKE_CASE : str = prepare_opt_inputs_dict(snake_case , snake_case )
return config, inputs_dict
def lowerCamelCase_ ( self : Dict , snake_case : Union[str, Any] , snake_case : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = TFOPTModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE : Optional[Any] = input_ids[:1, :]
SCREAMING_SNAKE_CASE : Tuple = inputs_dict['attention_mask'][:1, :]
SCREAMING_SNAKE_CASE : Tuple = 1
# first forward pass
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE : Dict = model(snake_case , attention_mask=snake_case )[0]
SCREAMING_SNAKE_CASE : Tuple = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case , snake_case , rtol=1E-3 )
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCAmelCase : List[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCAmelCase : int = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = 10
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFOPTModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case : int , snake_case : List[str] ):
if hasattr(snake_case , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=snake_case )
SCREAMING_SNAKE_CASE : str = _get_word_embedding_weight(snake_case , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : int = _get_word_embedding_weight(snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case )
SCREAMING_SNAKE_CASE : Dict = _get_word_embedding_weight(snake_case , model.get_input_embeddings() )
SCREAMING_SNAKE_CASE : Optional[Any] = _get_word_embedding_weight(snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case )
# check that weights remain the same after resizing
SCREAMING_SNAKE_CASE : Optional[int] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = False
self.assertTrue(snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case )
SCREAMING_SNAKE_CASE : List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
SCREAMING_SNAKE_CASE : List[str] = False
self.assertTrue(snake_case )
def __a ( __lowerCAmelCase ) -> int:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class lowercase ( unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Tuple = 99
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
SCREAMING_SNAKE_CASE : Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.shape[0]
SCREAMING_SNAKE_CASE : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFOPTModel.from_pretrained('facebook/opt-350m' )
SCREAMING_SNAKE_CASE : Optional[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
SCREAMING_SNAKE_CASE : int = tf.not_equal(snake_case , model.config.pad_token_id )
with tf.GradientTape():
SCREAMING_SNAKE_CASE : int = model(input_ids=snake_case , attention_mask=snake_case ).last_hidden_state
SCREAMING_SNAKE_CASE : Union[str, Any] = (1, 11, 512)
self.assertEqual(output.shape , snake_case )
SCREAMING_SNAKE_CASE : List[str] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=4E-3 ) )
SCREAMING_SNAKE_CASE : Any = tf.function(snake_case , jit_compile=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = xla_generate(snake_case , snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=4E-2 ) )
@require_tf
@slow
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = 'facebook/opt-350m'
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFOPTForCausalLM.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
SCREAMING_SNAKE_CASE : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(snake_case , return_tensors='tf' , padding=snake_case , add_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
SCREAMING_SNAKE_CASE : int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-4 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.function(snake_case , jit_compile=snake_case )
SCREAMING_SNAKE_CASE : Tuple = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-4 ) )
@require_tf
@slow
class lowercase ( unittest.TestCase):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/opt-125m'
SCREAMING_SNAKE_CASE : Optional[Any] = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : int = TFOPTForCausalLM.from_pretrained(snake_case )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : Tuple = tokenizer(snake_case , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(snake_case , max_length=10 )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE : int = GPTaTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = TFOPTForCausalLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : str = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(snake_case , return_tensors='tf' , padding=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = inputs['input_ids']
SCREAMING_SNAKE_CASE : List[Any] = model.generate(input_ids=snake_case , attention_mask=inputs['attention_mask'] )
SCREAMING_SNAKE_CASE : int = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=snake_case )
SCREAMING_SNAKE_CASE : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : int = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
SCREAMING_SNAKE_CASE : str = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'facebook/opt-350m'
SCREAMING_SNAKE_CASE : List[str] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = GPTaTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = TFOPTForCausalLM.from_pretrained(snake_case )
for prompt in self.prompts:
SCREAMING_SNAKE_CASE : str = tokenizer(snake_case , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE : Tuple = model.generate(snake_case , max_length=10 )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
| 308
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase : List[str] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 308
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : List[str] = TypeVar('DatasetType', Dataset, IterableDataset)
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[List[float]] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
else:
return _interleave_iterable_datasets(
UpperCamelCase , UpperCamelCase , UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , stopping_strategy=UpperCamelCase )
def snake_case_ (UpperCamelCase : List[DatasetType] , UpperCamelCase : Optional[DatasetInfo] = None , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(UpperCamelCase ):
if not isinstance(UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(UpperCamelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(UpperCamelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCamelCase ).__name__}.' )
if i == 0:
_a , _a = (
(Dataset, IterableDataset) if isinstance(UpperCamelCase , UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
else:
return _concatenate_iterable_datasets(UpperCamelCase , info=UpperCamelCase , split=UpperCamelCase , axis=UpperCamelCase )
| 22
|
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _a ( unittest.TestCase ):
a_ : Dict = StableDiffusionLDMaDPipeline
a_ : int = TEXT_TO_IMAGE_PARAMS
a_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase__ = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
lowerCamelCase__ = np.array([1_03.4_67_27, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _UpperCamelCase ( self : List[Any] ):
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs['prompt']]
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 3 * [inputs.pop('prompt' )]
lowerCamelCase__ = ldmad_pipe.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , )
lowerCamelCase__ = text_inputs['input_ids'].to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.text_encoder(SCREAMING_SNAKE_CASE__ )[0]
lowerCamelCase__ = prompt_embeds
# forward
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb_slice_a[0, -3:, -3:, -1]
lowerCamelCase__ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _UpperCamelCase ( self : Dict ):
lowerCamelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = StableDiffusionLDMaDPipeline(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 'french fries'
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1]
lowerCamelCase__ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCamelCase__ = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
lowerCamelCase__ = np.array([1_07.8_47_38, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="cpu" , SCREAMING_SNAKE_CASE__ : int=torch.floataa , SCREAMING_SNAKE_CASE__ : Tuple=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
lowerCamelCase__ = ldmad_pipe.to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = rgb[0, -3:, -3:, -1].flatten()
lowerCamelCase__ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCamelCase__ = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
lowerCamelCase__ = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _a ( unittest.TestCase ):
def _UpperCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]="cpu" , SCREAMING_SNAKE_CASE__ : Dict=torch.floataa , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.random.RandomState(SCREAMING_SNAKE_CASE__ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.49_55_86
lowerCamelCase__ = 0.33_79_55_15
lowerCamelCase__ = 1_12.4_85_18
lowerCamelCase__ = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _UpperCamelCase ( self : Any ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(SCREAMING_SNAKE_CASE__ )
ldmad_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_inputs(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = ldmad_pipe(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ , lowerCamelCase__ = output.rgb, output.depth
lowerCamelCase__ = 0.4_19_41_27
lowerCamelCase__ = 0.35_37_55_86
lowerCamelCase__ = 0.5_63_85_02
lowerCamelCase__ = 0.34_68_61_03
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 200 ) -> int:
'''simple docstring'''
snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case_ = [0] * (pence + 1)
snake_case_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCAmelCase, pence + 1, 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 640
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class a :
def __init__( self : Tuple , lowercase_ : Any , ):
snake_case_ = parent
snake_case_ = 13
snake_case_ = 7
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 2
snake_case_ = 99
snake_case_ = 0
snake_case_ = 32
snake_case_ = 2
snake_case_ = 4
snake_case_ = 0.1
snake_case_ = 0.1
snake_case_ = 512
snake_case_ = 16
snake_case_ = 2
snake_case_ = 0.02
snake_case_ = 3
snake_case_ = 4
snake_case_ = '''last'''
snake_case_ = True
snake_case_ = None
snake_case_ = 0
def A_ ( self : Union[str, Any] ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
snake_case_ = None
if self.use_input_lengths:
snake_case_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A_ ( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : str , lowercase_ : int , ):
snake_case_ = TFFlaubertModel(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ = model(lowercase_ )
snake_case_ = [input_ids, input_mask]
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[Any] , ):
snake_case_ = TFFlaubertWithLMHeadModel(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , ):
snake_case_ = TFFlaubertForQuestionAnsweringSimple(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int , lowercase_ : Dict , ):
snake_case_ = TFFlaubertForSequenceClassification(lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , ):
snake_case_ = self.num_labels
snake_case_ = TFFlaubertForTokenClassification(config=lowercase_ )
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Any , ):
snake_case_ = self.num_choices
snake_case_ = TFFlaubertForMultipleChoice(config=lowercase_ )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Dict ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case_ = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def A_ ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A_ ( self : int ):
snake_case_ = TFFlaubertModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , emb_dim=37 )
def A_ ( self : List[str] ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowercase_ )
@slow
def A_ ( self : Optional[int] ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFFlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
def A_ ( self : int ):
snake_case_ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
snake_case_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
snake_case_ = model(lowercase_ )[0]
snake_case_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice.
snake_case_ = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 640
| 1
|
'''simple docstring'''
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : str = [int(_lowercase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(_lowercase ) == 4 and all(0 <= int(_lowercase ) <= 254 for octet in octets )
if __name__ == "__main__":
__UpperCAmelCase :int = input().strip()
__UpperCAmelCase :Dict = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 715
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _a ( _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = int(number**0.5 )
return number == sq * sq
def _a ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__UpperCAmelCase : int = x_den * y_den * z_den
__UpperCAmelCase : int = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _a ( _lowercase : int = 35 ):
'''simple docstring'''
__UpperCAmelCase : set = set()
__UpperCAmelCase : int
__UpperCAmelCase : Fraction = Fraction(0 )
__UpperCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__UpperCAmelCase : Optional[int] = x_num * y_den + x_den * y_num
__UpperCAmelCase : Dict = x_den * y_den
__UpperCAmelCase : List[Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Dict = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__UpperCAmelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Tuple = int(sqrt(_lowercase ) )
__UpperCAmelCase : Union[str, Any] = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Union[str, Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
__UpperCAmelCase : Union[str, Any] = x_num * y_num
__UpperCAmelCase : List[Any] = x_den * y_num + x_num * y_den
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : Optional[Any] = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCAmelCase : Optional[Any] = x_num * x_num * y_num * y_num
__UpperCAmelCase : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCAmelCase : Any = int(sqrt(_lowercase ) )
__UpperCAmelCase : List[Any] = int(sqrt(_lowercase ) )
__UpperCAmelCase : Any = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCAmelCase : int = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 266
| 0
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__)
class _UpperCAmelCase ( lowerCAmelCase__):
def __init__( self : Any , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : str ):
return super().__call__(lowercase_ , **lowercase_ )
def _snake_case ( self : Optional[int] , **lowercase_ : List[Any] ):
snake_case_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
snake_case_ : Dict = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
snake_case_ : Optional[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _snake_case ( self : Any , lowercase_ : Tuple , lowercase_ : str=None , lowercase_ : str="This is a photo of {}." ):
snake_case_ : Any = load_image(lowercase_ )
snake_case_ : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case_ : Dict = candidate_labels
snake_case_ : Union[str, Any] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels]
snake_case_ : Tuple = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ )
snake_case_ : Optional[int] = [text_inputs]
return inputs
def _snake_case ( self : List[Any] , lowercase_ : List[str] ):
snake_case_ : List[Any] = model_inputs.pop('''candidate_labels''' )
snake_case_ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , lowercase_ ):
snake_case_ : List[str] = text_inputs[0]
else:
# Batching case.
snake_case_ : Optional[int] = text_inputs[0][0]
snake_case_ : Optional[Any] = self.model(**lowercase_ , **lowercase_ )
snake_case_ : Optional[Any] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _snake_case ( self : Union[str, Any] , lowercase_ : Optional[Any] ):
snake_case_ : Dict = model_outputs.pop('''candidate_labels''' )
snake_case_ : Dict = model_outputs['''logits'''][0]
if self.framework == "pt":
snake_case_ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case_ : Tuple = probs.tolist()
if not isinstance(lowercase_ , lowercase_ ):
snake_case_ : Dict = [scores]
elif self.framework == "tf":
snake_case_ : Optional[int] = stable_softmax(lowercase_ , axis=-1 )
snake_case_ : str = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case_ : int = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 123
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase__ : str = datasets.logging.get_logger(__name__)
lowercase__ : List[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
lowercase__ : Optional[Any] = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
lowercase__ : str = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowercase ( _a , _a , _a=False , _a=False , _a=True , _a=False , _a="dummy_doc" ):
snake_case_ : Union[str, Any] = {doc: key_lines}
snake_case_ : int = {doc: sys_lines}
snake_case_ : Optional[Any] = {}
snake_case_ : Dict = 0
snake_case_ : List[Any] = 0
snake_case_ : str = 0
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
snake_case_, snake_case_ : Dict = reader.get_doc_mentions(_a , key_doc_lines[doc] , _a )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : List[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
snake_case_, snake_case_ : str = reader.get_doc_mentions(_a , sys_doc_lines[doc] , _a )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case_ : Optional[Any] = reader.set_annotated_parse_trees(_a , key_doc_lines[doc] , _a , _a )
if remove_nested:
snake_case_, snake_case_ : Union[str, Any] = reader.remove_nested_coref_mentions(_a , _a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case_, snake_case_ : Optional[int] = reader.remove_nested_coref_mentions(_a , _a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case_ : List[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : Optional[Any] = reader.get_mention_assignments(_a , _a )
snake_case_ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def __lowercase ( _a , _a , _a , _a , _a , _a , _a ):
snake_case_ : str = get_coref_infos(_a , _a , _a , _a , _a , _a )
snake_case_ : Any = {}
snake_case_ : List[str] = 0
snake_case_ : Any = 0
for name, metric in metrics:
snake_case_, snake_case_, snake_case_ : Union[str, Any] = evaluator.evaluate_documents(_a , _a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , f"Recall: {recall * 100:.2f}" , f" Precision: {precision * 100:.2f}" , f" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
snake_case_ : Optional[Any] = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __lowercase ( _a ):
snake_case_ : Any = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case_ : List[str] = line.split()[5]
if not parse_col == "-":
snake_case_ : Optional[int] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _UpperCAmelCase ( datasets.Metric):
def _snake_case ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def _snake_case ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[Any]=True , lowercase_ : Tuple=False , lowercase_ : Any=False , lowercase_ : List[Any]=False ):
snake_case_ : Optional[int] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case_ : str = util.check_gold_parse_annotation(lowercase_ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case_ : Optional[Any] = evaluate(
key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , )
return score
| 123
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> bool:
__UpperCAmelCase =str(snake_case__ )
return len(snake_case__ ) == 9 and set(snake_case__ ) == set('''123456789''' )
def SCREAMING_SNAKE_CASE ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
__UpperCAmelCase =10_0002 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__UpperCAmelCase =100_2003 * base_num
if is_9_pandigital(snake_case__ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 142
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase_ = get_logger()
UpperCamelCase_ = None
class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase):
'''simple docstring'''
super().__init__(features=UpperCAmelCase)
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase):
raise ValueError(
f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """
f"""device: {str(jax.devices()[0])}.""")
__UpperCAmelCase =str(jax.devices()[0])
__UpperCAmelCase =jnp_array_kwargs
@staticmethod
def A__ ():
'''simple docstring'''
import jax
return {str(UpperCAmelCase): device for device in jax.devices()}
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase) and column:
if all(
isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(UpperCAmelCase , axis=0)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__UpperCAmelCase ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
else:
__UpperCAmelCase ={'''dtype''': jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__UpperCAmelCase ={'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image):
__UpperCAmelCase =np.asarray(UpperCAmelCase)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__UpperCAmelCase =self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs})
def A__ (self , UpperCAmelCase):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array):
__UpperCAmelCase =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
elif isinstance(UpperCAmelCase , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct])
return self._tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase)
return self.recursive_tensorize(UpperCAmelCase)
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0])
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
__UpperCAmelCase =self._consolidate(UpperCAmelCase)
return column
def A__ (self , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase)
__UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase)
__UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase)
for column_name in batch:
__UpperCAmelCase =self._consolidate(batch[column_name])
return batch
| 142
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
__UpperCAmelCase = '▁'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[str] = ['''input_ids''', '''attention_mask''']
_snake_case : str = BarthezTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , **_UpperCamelCase , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : int = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : List[Any] = vocab_file
UpperCAmelCase_ : Dict = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
UpperCAmelCase_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : Any = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ):
copyfile(self.vocab_file , _UpperCamelCase )
return (out_vocab_file,)
| 406
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
| 1
|
def _SCREAMING_SNAKE_CASE ( __lowercase : float , __lowercase : list[float] ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("""Discount rate cannot be negative""" )
if not cash_flows:
raise ValueError("""Cash flows list cannot be empty""" )
__A = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowercase ) )
return round(__lowercase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class __lowercase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : float = 0.18215 , UpperCamelCase_ : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__A = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
__A = vq_embed_dim if vq_embed_dim is not None else latent_channels
__A = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
__A = VectorQuantizer(UpperCamelCase_ , UpperCamelCase_ , beta=0.25 , remap=UpperCamelCase_ , sane_index_shape=UpperCamelCase_ )
__A = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
# pass init params to Decoder
__A = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , norm_type=UpperCamelCase_ , )
@apply_forward_hook
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ):
"""simple docstring"""
__A = self.encoder(UpperCamelCase_ )
__A = self.quant_conv(UpperCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase_ )
@apply_forward_hook
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True ):
"""simple docstring"""
if not force_not_quantize:
__A , __A , __A = self.quantize(UpperCamelCase_ )
else:
__A = h
__A = self.post_quant_conv(UpperCamelCase_ )
__A = self.decoder(UpperCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ):
"""simple docstring"""
__A = sample
__A = self.encode(UpperCamelCase_ ).latents
__A = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 199
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : Optional[Any] = 250_004
a : List[Any] = 250_020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MBartaaTokenizer
__SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def A ( self : Dict ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = MBartaaTokenizer(a_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "<s>"
__snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 1_054 )
def A ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def A ( self : Any ):
"""simple docstring"""
__snake_case = MBartaaTokenizer(a_ , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=a_ )
__snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def A ( self : Tuple ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
__snake_case = self.tokenizer_class.from_pretrained(a_ , **a_ )
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a_ )
__snake_case = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__snake_case = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a_ , a_ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a_ )
__snake_case = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=True
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a_ , legacy_format=a_ )
__snake_case = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files
self.assertSequenceEqual(a_ , a_ )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a_ )
__snake_case = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=False
__snake_case = tempfile.mkdtemp()
__snake_case = tokenizer_r.save_pretrained(a_ , legacy_format=a_ )
__snake_case = tokenizer_p.save_pretrained(a_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case = tokenizer_r.from_pretrained(a_ )
__snake_case = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
shutil.rmtree(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = """facebook/mbart-large-50-one-to-many-mmt"""
__SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__SCREAMING_SNAKE_CASE = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def A ( cls : List[Any] ):
"""simple docstring"""
__snake_case = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__snake_case = 1
return cls
def A ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a_ )
def A ( self : Dict ):
"""simple docstring"""
self.assertIn(a_ , self.tokenizer.all_special_ids )
__snake_case = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
__snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , a_ )
__snake_case = 10
__snake_case = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0]
self.assertEqual(ids[0] , a_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(a_ ) , a_ )
def A ( self : str ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
__snake_case = MBartaaTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a_ , return_tensors="pt" )
__snake_case = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def A ( self : int ):
"""simple docstring"""
__snake_case = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__snake_case = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors="pt" )
__snake_case = self.tokenizer(
text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors="pt" )
__snake_case = targets["input_ids"]
__snake_case = shift_tokens_right(a_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(a_ ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 69
|
'''simple docstring'''
import argparse
import os
import re
__snake_case : Dict = 'src/diffusers'
# Pattern that looks at the indentation in a line.
__snake_case : Optional[Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
__snake_case : Tuple = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__snake_case : Dict = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
__snake_case : Union[str, Any] = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__snake_case : Any = re.compile(r'\[([^\]]+)\]')
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
A__ : Optional[int] =_re_indent.search(__snake_case )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( __snake_case : str, __snake_case : Union[str, Any]="", __snake_case : Tuple=None, __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
A__ : str =0
A__ : List[Any] =code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(__snake_case ):
index += 1
A__ : Union[str, Any] =["""\n""".join(lines[:index] )]
else:
A__ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : int =[lines[index]]
index += 1
while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(__snake_case ) )
if index < len(__snake_case ) - 1:
A__ : Any =[lines[index + 1]]
index += 1
else:
A__ : List[str] =[]
else:
blocks.append("""\n""".join(__snake_case ) )
A__ : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__snake_case ) > 0:
blocks.append("""\n""".join(__snake_case ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__snake_case ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __lowerCamelCase ( __snake_case : Dict ) -> Dict:
"""simple docstring"""
def _inner(__snake_case : List[Any] ):
return key(__snake_case ).lower().replace("""_""", """""" )
return _inner
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(__snake_case : int ):
return x
if key is None:
A__ : Optional[int] =noop
# Constants are all uppercase, they go first.
A__ : Tuple =[obj for obj in objects if key(__snake_case ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : List[str] =[obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : Union[str, Any] =[obj for obj in objects if not key(__snake_case )[0].isupper()]
A__ : Union[str, Any] =ignore_underscore(__snake_case )
return sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case ) + sorted(__snake_case, key=__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def _replace(__snake_case : Any ):
A__ : str =match.groups()[0]
if "," not in imports:
return f"[{imports}]"
A__ : Tuple =[part.strip().replace("""\"""", """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([f"\"{k}\"" for k in sort_objects(__snake_case )] ) + "]"
A__ : int =import_statement.split("""\n""" )
if len(__snake_case ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Optional[int] =2 if lines[1].strip() == """[""" else 1
A__ : Optional[int] =[(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : List[str] =sort_objects(__snake_case, key=lambda __snake_case : x[1] )
A__ : Tuple =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__snake_case ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : List[Any] =_re_bracket_content.sub(_replace, lines[1] )
else:
A__ : List[str] =[part.strip().replace("""\"""", """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : List[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + """, """.join([f"\"{k}\"" for k in sort_objects(__snake_case )] )
return "\n".join(__snake_case )
else:
# Finally we have to deal with imports fitting on one line
A__ : Union[str, Any] =_re_bracket_content.sub(_replace, __snake_case )
return import_statement
def __lowerCamelCase ( __snake_case : List[str], __snake_case : str=True ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
A__ : str =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
__snake_case, start_prompt="""_import_structure = {""", end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1, len(__snake_case ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : Optional[Any] =main_blocks[block_idx]
A__ : Optional[Any] =block.split("""\n""" )
# Get to the start of the imports.
A__ : Optional[Any] =0
while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(__snake_case )
else:
line_idx += 1
if line_idx >= len(__snake_case ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : str ="""\n""".join(block_lines[line_idx:-1] )
A__ : Dict =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Dict =split_code_in_indented_blocks(__snake_case, indent_level=__snake_case )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : int =_re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : int =[(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : str =[(i, key) for i, key in enumerate(__snake_case ) if key is not None]
A__ : Optional[int] =[x[0] for x in sorted(__snake_case, key=lambda __snake_case : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[Any] =0
A__ : int =[]
for i in range(len(__snake_case ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
A__ : Union[str, Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__snake_case )
count += 1
# And we put our main block back together with its first and last line.
A__ : Any ="""\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__snake_case ):
if check_only:
return True
else:
print(f"Overwriting {file}." )
with open(__snake_case, """w""" ) as f:
f.write("""\n""".join(__snake_case ) )
def __lowerCamelCase ( __snake_case : Dict=True ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =[]
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
A__ : Tuple =sort_imports(os.path.join(__snake_case, """__init__.py""" ), check_only=__snake_case )
if result:
A__ : str =[os.path.join(__snake_case, """__init__.py""" )]
if len(__snake_case ) > 0:
raise ValueError(f"Would overwrite {len(__snake_case )} files, run `make style`." )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
__snake_case : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 215
| 0
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> List[Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
UpperCAmelCase : Tuple = mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
UpperCAmelCase : List[Any] = max(
mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , mf_knapsack(i - 1 , _lowerCAmelCase , _lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
UpperCAmelCase : List[str] = val
return f[i][j]
def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] ) -> int:
UpperCAmelCase : List[str] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
UpperCAmelCase : Optional[int] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
UpperCAmelCase : List[str] = dp[i - 1][w_]
return dp[n][w_], dp
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : list , _lowerCAmelCase : list ) -> Tuple:
if not (isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(_lowerCAmelCase , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
UpperCAmelCase : List[str] = len(_lowerCAmelCase )
if num_items != len(_lowerCAmelCase ):
UpperCAmelCase : List[str] = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(_lowerCAmelCase )} values"""
)
raise ValueError(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
if not isinstance(wt[i] , _lowerCAmelCase ):
UpperCAmelCase : Any = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = knapsack(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : set = set()
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return optimal_val, example_optional_set
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : set ) -> List[Any]:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , i - 1 , _lowerCAmelCase , _lowerCAmelCase )
else:
optimal_set.add(_lowerCAmelCase )
_construct_solution(_lowerCAmelCase , _lowerCAmelCase , i - 1 , j - wt[i - 1] , _lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase__: Tuple = [3, 2, 4, 4]
UpperCamelCase__: Any = [4, 3, 2, 3]
UpperCamelCase__: str = 4
UpperCamelCase__: Tuple = 6
UpperCamelCase__: int = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCamelCase__ , UpperCamelCase__: List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 528
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
UpperCAmelCase : Optional[Any] = [False] * len(_lowerCAmelCase )
UpperCAmelCase : Dict = []
queue.append(_lowerCAmelCase )
UpperCAmelCase : str = True
while queue:
UpperCAmelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[int] = u
return visited[t]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Any:
# This array is filled by BFS and to store path
UpperCAmelCase : Dict = [-1] * (len(_lowerCAmelCase ))
UpperCAmelCase : Any = 0
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : List[str] = float('''Inf''' )
UpperCAmelCase : str = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : int = min(_lowerCAmelCase , graph[parent[s]][s] )
UpperCAmelCase : Tuple = parent[s]
max_flow += path_flow
UpperCAmelCase : Optional[Any] = sink
while v != source:
UpperCAmelCase : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Dict = parent[v]
return max_flow
UpperCamelCase__: Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__: int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 528
| 1
|
import random
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ = 1024 ):
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(lowerCamelCase__ ):
return num
if __name__ == "__main__":
snake_case : List[str] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 605
|
"""simple docstring"""
from __future__ import annotations
import bisect
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase__ )
while lo < hi:
_SCREAMING_SNAKE_CASE : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_SCREAMING_SNAKE_CASE : int = mid + 1
else:
_SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase__ )
while lo < hi:
_SCREAMING_SNAKE_CASE : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
_SCREAMING_SNAKE_CASE : int = mid
return lo
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_left(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_right(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ) -> int | None:
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ ) - 1
while left <= right:
_SCREAMING_SNAKE_CASE : Union[str, Any] = left + (right - left) // 2
_SCREAMING_SNAKE_CASE : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_SCREAMING_SNAKE_CASE : Optional[int] = midpoint - 1
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = midpoint + 1
return None
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ) -> int | None:
_SCREAMING_SNAKE_CASE : List[str] = bisect.bisect_left(lowerCamelCase__, lowerCamelCase__ )
if index != len(lowerCamelCase__ ) and sorted_collection[index] == item:
return index
return None
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> int | None:
if right < left:
return None
_SCREAMING_SNAKE_CASE : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, midpoint - 1 )
else:
return binary_search_by_recursion(lowerCamelCase__, lowerCamelCase__, midpoint + 1, lowerCamelCase__ )
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
lowercase_ : Any = sorted(int(item) for item in user_input.split(''','''))
lowercase_ : str = int(input('''Enter a single number to be found in the list:\n'''))
lowercase_ : List[str] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 572
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : str = "markuplm"
def __init__( self : List[str] , snake_case : Dict=30522 , snake_case : Optional[int]=768 , snake_case : Tuple=12 , snake_case : List[str]=12 , snake_case : Tuple=3072 , snake_case : Tuple="gelu" , snake_case : Union[str, Any]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : int=512 , snake_case : Any=2 , snake_case : Optional[int]=0.02 , snake_case : Any=1E-12 , snake_case : int=0 , snake_case : Dict=0 , snake_case : Optional[int]=2 , snake_case : List[str]=256 , snake_case : Tuple=1024 , snake_case : str=216 , snake_case : Union[str, Any]=1001 , snake_case : Tuple=32 , snake_case : Union[str, Any]=50 , snake_case : List[Any]="absolute" , snake_case : List[str]=True , snake_case : str=None , **snake_case : Tuple , ):
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = use_cache
__UpperCamelCase = classifier_dropout
# additional properties
__UpperCamelCase = max_depth
__UpperCamelCase = max_xpath_tag_unit_embeddings
__UpperCamelCase = max_xpath_subs_unit_embeddings
__UpperCamelCase = tag_pad_id
__UpperCamelCase = subs_pad_id
__UpperCamelCase = xpath_unit_hidden_size
| 705
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = "rwkv"
lowerCAmelCase__ : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self : Optional[int] , snake_case : Optional[Any]=50277 , snake_case : str=1024 , snake_case : str=4096 , snake_case : Optional[Any]=32 , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[Any]=1E-5 , snake_case : List[str]=0 , snake_case : Optional[Any]=0 , snake_case : Union[str, Any]=6 , snake_case : Tuple=False , snake_case : Any=True , **snake_case : List[str] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = context_length
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCamelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = rescale_every
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(
tie_word_embeddings=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 375
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 630
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : str = 3_84
if "tiny" in model_name:
__A : Union[str, Any] = [3, 3, 9, 3]
__A : Any = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
__A : str = [3, 3, 27, 3]
__A : Dict = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
__A : Any = [3, 3, 27, 3]
__A : str = [1_28, 2_56, 5_12, 10_24]
__A : Optional[Any] = 5_12
if "large" in model_name:
__A : Dict = [3, 3, 27, 3]
__A : Any = [1_92, 3_84, 7_68, 15_36]
__A : str = 7_68
if "xlarge" in model_name:
__A : int = [3, 3, 27, 3]
__A : Optional[Any] = [2_56, 5_12, 10_24, 20_48]
__A : Optional[Any] = 10_24
# set label information
__A : int = 1_50
__A : int = 'huggingface/label-files'
__A : Any = 'ade20k-id2label.json'
__A : int = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
__A : List[Any] = {int(a ): v for k, v in idalabel.items()}
__A : List[Any] = {v: k for k, v in idalabel.items()}
__A : int = ConvNextConfig(
depths=a , hidden_sizes=a , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__A : Tuple = UperNetConfig(
backbone_config=a , auxiliary_in_channels=a , num_labels=a , idalabel=a , labelaid=a , )
return config
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : str = []
# fmt: off
# stem
rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') )
rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') )
rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') )
rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.{j}.gamma""", F"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.depthwise_conv.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.norm.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((F"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", F"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((F"""backbone.downsample_layers.{i}.0.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.0.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.weight""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((F"""backbone.downsample_layers.{i}.1.bias""", F"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Tuple:
__A : int = dct.pop(a )
__A : int = val
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Any:
__A : List[Any] = {
'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth',
'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth',
'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth',
'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth',
'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth',
}
__A : List[str] = model_name_to_url[model_name]
__A : Tuple = torch.hub.load_state_dict_from_url(a , map_location='cpu' )['state_dict']
__A : List[str] = get_upernet_config(a )
__A : Dict = UperNetForSemanticSegmentation(a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__A : str = state_dict.pop(a )
if "bn" in key:
__A : str = key.replace('bn' , 'batch_norm' )
__A : Optional[int] = val
# rename keys
__A : str = create_rename_keys(a )
for src, dest in rename_keys:
rename_key(a , a , a )
model.load_state_dict(a )
# verify on image
__A : Union[str, Any] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
__A : str = Image.open(requests.get(a , stream=a ).raw ).convert('RGB' )
__A : List[Any] = SegformerImageProcessor()
__A : str = processor(a , return_tensors='pt' ).pixel_values
with torch.no_grad():
__A : Tuple = model(a )
if model_name == "upernet-convnext-tiny":
__A : Optional[Any] = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
__A : Dict = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
__A : List[Any] = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
__A : Union[str, Any] = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
__A : List[Any] = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , a , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(a )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 239
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =(CMStochasticIterativeScheduler,)
__UpperCAmelCase =10
def A__ ( self , **_A ):
__lowerCAmelCase = {
'num_train_timesteps': 2_0_1,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**_A )
return config
def A__ ( self ):
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = self.scheduler_classes[0](**_A )
scheduler.set_timesteps(_A )
__lowerCAmelCase = scheduler.timesteps[0]
__lowerCAmelCase = scheduler.timesteps[1]
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = scheduler.step(_A , _A , _A ).prev_sample
__lowerCAmelCase = scheduler.step(_A , _A , _A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def A__ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_A )
def A__ ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = 1
scheduler.set_timesteps(_A )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_A ):
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(_A , _A )
# 2. predict noise residual
__lowerCAmelCase = model(_A , _A )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def A__ ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = [1_0_6, 0]
scheduler.set_timesteps(timesteps=_A )
__lowerCAmelCase = scheduler.timesteps
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCAmelCase = scheduler.scale_model_input(_A , _A )
# 2. predict noise residual
__lowerCAmelCase = model(_A , _A )
# 3. predict previous sample x_t-1
__lowerCAmelCase = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def A__ ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(_A , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def A__ ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = [3_9, 3_0, 1_2, 1, 0]
__lowerCAmelCase = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def A__ ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A )
| 707
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_a )} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCAmelCase =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def A__ ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class snake_case_ :
"""simple docstring"""
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCAmelCase =field(default=_a , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase =field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
__UpperCAmelCase =field(
default=_a , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCAmelCase =field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__UpperCAmelCase =field(
default=_a , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def A__ ( self ):
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
with open(UpperCAmelCase__ , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(UpperCAmelCase__ )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
__lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(UpperCAmelCase__ ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102
| 0
|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE__ : Tuple = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowercase__ )
DownloadCommand.register_subcommand(lowercase__ )
EnvironmentCommand.register_subcommand(lowercase__ )
RunCommand.register_subcommand(lowercase__ )
ServeCommand.register_subcommand(lowercase__ )
UserCommands.register_subcommand(lowercase__ )
AddNewModelCommand.register_subcommand(lowercase__ )
AddNewModelLikeCommand.register_subcommand(lowercase__ )
LfsCommands.register_subcommand(lowercase__ )
PTtoTFCommand.register_subcommand(lowercase__ )
# Let's go
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
if not hasattr(lowercase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ : str = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 85
|
def lowerCAmelCase ( ) ->Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__magic_name__ : Optional[Any] = 6
__magic_name__ : Dict = 1
__magic_name__ : Union[str, Any] = 1901
__magic_name__ : List[str] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__magic_name__ : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__magic_name__ : Optional[int] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__magic_name__ : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__magic_name__ : int = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 154
| 0
|
from __future__ import annotations
def lowercase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : list[list[int]] =[]
snake_case__ : list[int] =[]
snake_case__ : Union[str, Any] =0
snake_case__ : Optional[Any] =sum(SCREAMING_SNAKE_CASE )
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
def lowercase_ ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
if sum(SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE ) == max_sum:
result.append(SCREAMING_SNAKE_CASE )
return
for index in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
lowerCamelCase__ = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ = 9
lowerCamelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 408
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase__ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCamelCase__ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCamelCase__ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return float((preds == labels).mean() )
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Tuple =simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : int =float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
snake_case__ : Dict =float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
snake_case__ : Dict =float(spearmanr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 408
| 1
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
snake_case_ = "M-CLIP"
def __init__( self : Dict , __snake_case : str=10_24 , __snake_case : List[str]=7_68 , **__snake_case : Any )-> int:
snake_case = transformerDimSize
snake_case = imageDimSize
super().__init__(**_lowercase )
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
snake_case_ = MCLIPConfig
def __init__( self : Dict , __snake_case : Any , *__snake_case : Optional[Any] , **__snake_case : int )-> Union[str, Any]:
super().__init__(_lowercase , *_lowercase , **_lowercase )
snake_case = XLMRobertaModel(_lowercase )
snake_case = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Dict , __snake_case : str )-> str:
snake_case = self.transformer(input_ids=_lowercase , attention_mask=_lowercase )[0]
snake_case = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowercase ), embs
| 369
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowercase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowercase__ = spec.loader.load_module()
lowercase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowercase__ = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowercase__ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __magic_name__ ( ):
__a : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
__a : Dict = False
# source code of `config_class`
__a : List[str] = inspect.getsource(_lowerCamelCase )
__a : int = _re_checkpoint.findall(_lowerCamelCase )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__a , __a : Union[str, Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__a : Optional[Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
__a : Optional[int] = True
break
__a : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__a : Any = """\n""".join(sorted(_lowerCamelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 581
| 0
|
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = OmegaConf.load(_UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_UpperCAmelCase ) ) )
return config
def A__ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=None ) -> List[str]:
'''simple docstring'''
if conf_path is None:
snake_case__ : Tuple = "./model_checkpoints/vqgan_only.yaml"
snake_case__ : List[str] = load_config(_UpperCAmelCase , display=_UpperCAmelCase )
snake_case__ : Any = VQModel(**config.model.params )
if ckpt_path is None:
snake_case__ : Any = "./model_checkpoints/vqgan_only.pt"
snake_case__ : Dict = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )
if ".ckpt" in ckpt_path:
snake_case__ : Tuple = sd["state_dict"]
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.to(_UpperCAmelCase )
del sd
return model
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
snake_case__, snake_case__, snake_case__ : List[str] = model.encode(_UpperCAmelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
snake_case__ : Optional[Any] = model.decode(_UpperCAmelCase )
return xrec
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Any=False ) -> Tuple:
'''simple docstring'''
snake_case__, snake_case__ : Union[str, Any] = string.rsplit("." , 1 )
if reload:
snake_case__ : Union[str, Any] = importlib.import_module(_UpperCAmelCase )
importlib.reload(_UpperCAmelCase )
return getattr(importlib.import_module(_UpperCAmelCase , package=_UpperCAmelCase ) , cls )
def A__ ( _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]=True ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = instantiate_from_config(_UpperCAmelCase )
if sd is not None:
model.load_state_dict(_UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A__ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if ckpt:
snake_case__ : Optional[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
snake_case__ : Any = pl_sd["global_step"]
print(F"""loaded model from global step {global_step}.""" )
else:
snake_case__ : int = {"state_dict": None}
snake_case__ : Optional[int] = None
snake_case__ : Any = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_UpperCAmelCase , eval_mode=_UpperCAmelCase )["model"]
return model, global_step
| 150
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE = []
for char_count in range(__UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 109
|
from __future__ import annotations
import numpy as np
def __UpperCamelCase ( _lowerCAmelCase ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
A , A : int = np.shape(_lowerCAmelCase )
if rows != columns:
A : Union[str, Any] = (
"""'table' has to be of square shaped array but got a """
f'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(_lowerCAmelCase )
A : Union[str, Any] = np.zeros((rows, columns) )
A : Dict = np.zeros((rows, columns) )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
A : Any = (table[i][j] - total) / upper[j][j]
A : Union[str, Any] = 1
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
A : Any = sum(lower[i][k] * upper[k][j] for k in range(_lowerCAmelCase ) )
A : str = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "mgp-str"
def __init__( self: Union[str, Any], a_: Optional[int]=[32, 128], a_: Tuple=4, a_: Any=3, a_: int=27, a_: Union[str, Any]=38, a_: List[Any]=50_257, a_: List[Any]=30_522, a_: Optional[int]=768, a_: List[str]=12, a_: List[str]=12, a_: Optional[Any]=4.0, a_: List[Any]=True, a_: List[Any]=False, a_: Tuple=1E-5, a_: Any=0.0, a_: List[Any]=0.0, a_: List[Any]=0.0, a_: Optional[Any]=False, a_: List[Any]=0.02, **a_: Optional[Any], ):
'''simple docstring'''
super().__init__(**__A )
_snake_case : Any = image_size
_snake_case : Optional[int] = patch_size
_snake_case : List[str] = num_channels
_snake_case : List[Any] = max_token_length
_snake_case : str = num_character_labels
_snake_case : Dict = num_bpe_labels
_snake_case : Optional[Any] = num_wordpiece_labels
_snake_case : List[Any] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : List[str] = mlp_ratio
_snake_case : List[str] = distilled
_snake_case : Tuple = layer_norm_eps
_snake_case : str = drop_rate
_snake_case : Optional[int] = qkv_bias
_snake_case : Dict = attn_drop_rate
_snake_case : Union[str, Any] = drop_path_rate
_snake_case : List[Any] = output_aa_attentions
_snake_case : Tuple = initializer_range
| 711
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
A_ = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_snake_case : Any = []
for num in range(len(snake_case__ ) ):
_snake_case : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_snake_case : Optional[int] = odd_composites[num] - 2 * i * i
if is_prime(snake_case__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Tuple ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
UpperCAmelCase_ = getattr(snake_case_ , snake_case_ ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase_ = None
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
elif name.split("." )[0] == "proj":
UpperCAmelCase_ = fairseq_model.proj
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(snake_case_ )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( snake_case_ : Any ) -> List[Any]:
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = [line.split(" " )[0] for line in lines]
UpperCAmelCase_ = len(snake_case_ )
UpperCAmelCase_ = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(snake_case_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[int] , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = WavaVecaConfig.from_pretrained(snake_case_ )
UpperCAmelCase_ = SpeechaTextaConfig.from_pretrained(
snake_case_ , vocab_size=snake_case_ , decoder_layers=snake_case_ , do_stable_layer_norm=snake_case_ )
UpperCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase_ = WavaVecaModel(snake_case_ )
UpperCAmelCase_ = recursively_load_weights_wavaveca(model.encoder , snake_case_ )
UpperCAmelCase_ = SpeechaTextaForCausalLM(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case_ )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCAmelCase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase_ = SpeechEncoderDecoderModel(encoder=snake_case_ , decoder=snake_case_ )
UpperCAmelCase_ = False
# add projection layer
UpperCAmelCase_ = nn.Parameter(projection_layer.weight )
UpperCAmelCase_ = nn.Parameter(projection_layer.bias )
UpperCAmelCase_ = create_vocab_dict(snake_case_ )
with open(os.path.join(snake_case_ , "vocab.json" ) , "w" ) as fp:
json.dump(snake_case_ , snake_case_ )
UpperCAmelCase_ = SpeechaTextaTokenizer(os.path.join(snake_case_ , "vocab.json" ) )
tokenizer.save_pretrained(snake_case_ )
UpperCAmelCase_ = hf_wavavec.config.to_dict()
UpperCAmelCase_ = tokenizer.pad_token_id
UpperCAmelCase_ = tokenizer.bos_token_id
UpperCAmelCase_ = tokenizer.eos_token_id
UpperCAmelCase_ = "speech_to_text_2"
UpperCAmelCase_ = "wav2vec2"
UpperCAmelCase_ = SpeechEncoderDecoderConfig.from_dict(snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
feature_extractor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 78
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
UpperCAmelCase_ = transform(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
return image
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , snake_case_ )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , snake_case_ )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , snake_case_ )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , snake_case_ )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , snake_case_ )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , snake_case_ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , snake_case_ )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , snake_case_ )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , snake_case_ )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , snake_case_ )
return key
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Any=None ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(snake_case_ )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(snake_case_ ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=snake_case_ , image_size=3_84 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
hf_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = 3_84
UpperCAmelCase_ = load_demo_image(image_size=snake_case_ , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(snake_case_ , snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase_ = hf_model.generate(snake_case_ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(snake_case_ )
hf_vqa_model.load_state_dict(snake_case_ )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(snake_case_ , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(snake_case_ , snake_case_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=snake_case_ , image_size=snake_case_ , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(snake_case_ )
UpperCAmelCase_ = rename_key(snake_case_ )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(snake_case_ )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
snake_case_ , return_tensors="pt" , padding="max_length" , truncation=snake_case_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case_ )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
UpperCAmelCase_ = hf_itm_model(snake_case_ , snake_case_ , use_itm_head=snake_case_ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 78
| 1
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--original_config_file""",
type=str,
required=True,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--image_size""",
default=5_12,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
def __A ( a_ : List[str] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'''could not parse string as bool {string}''' )
parser.add_argument(
"""--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool
)
parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 551
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCAmelCase = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCAmelCase = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : Any = 0.0
for i, j in zip(a_ , a_ ):
n_correct += 1.0 if math_equivalence.is_equiv(a_ , a_ ) else 0.0
lowerCAmelCase : List[str] = n_correct / len(a_ )
return {
"accuracy": accuracy,
}
| 551
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = 20
lowerCAmelCase__ = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ )
# tweak scores to not be uniform anymore
lowerCAmelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase__ = jax.nn.softmax(lowerCamelCase_ , axis=-1 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
lowerCAmelCase__ = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create ramp distribution
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCAmelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy()
lowerCAmelCase__ = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = None
lowerCAmelCase__ = 10
lowerCAmelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase__ = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase__ = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
# check that min length is applied at length 5
lowerCAmelCase__ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCAmelCase__ = 5
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = 15
lowerCAmelCase__ = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase__ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCAmelCase__ = 1
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = 20
lowerCAmelCase__ = 4
lowerCAmelCase__ = 0
lowerCAmelCase__ = 5
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase__ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCAmelCase__ = 4
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase__ = 3
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = 10
# no processor list
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# with processor list
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = 4
lowerCAmelCase__ = 10
lowerCAmelCase__ = 15
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
lowerCAmelCase__ = 15
# dummy input_ids and scores
lowerCAmelCase__ = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ )
lowerCAmelCase__ = input_ids.copy()
lowerCAmelCase__ = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = scores.copy()
# instantiate all dist processors
lowerCAmelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase__ = FlaxTopKLogitsWarper(3 )
lowerCAmelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
lowerCAmelCase__ = 10
# no processor list
def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
lowerCAmelCase__ = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
# with processor list
def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase__ = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ )
return scores
lowerCAmelCase__ = jax.jit(lowerCamelCase_ )
lowerCAmelCase__ = jax.jit(lowerCamelCase_ )
lowerCAmelCase__ = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 90
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
a = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
a = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
a = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
a = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
a = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
a = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
a = ""
a = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
a = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
a = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Dict:
assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
_UpperCAmelCase = ReadMe.from_string(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Tuple:
with pytest.raises(snake_case , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
_UpperCAmelCase = ReadMe.from_readme(snake_case , snake_case )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
_UpperCAmelCase = expected_error.format(path=snake_case )
with pytest.raises(snake_case , match=re.escape(snake_case ) ):
ReadMe.from_readme(snake_case , snake_case )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = Path(snake_case ) / """README.md"""
with open(snake_case , """w+""" ) as readme_file:
readme_file.write(snake_case )
ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
| 518
| 0
|
'''simple docstring'''
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
__lowerCAmelCase : List[str] = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def A ( _UpperCAmelCase : int = 1_0_0 ) -> int:
'''simple docstring'''
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Dict = 2
for i in range(2 ,max_n + 1 ):
__lowerCAmelCase : Optional[int] = pre_numerator
__lowerCAmelCase : str = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : Tuple = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( a , unittest.TestCase ):
'''simple docstring'''
_snake_case = OpenAIGPTTokenizer
_snake_case = OpenAIGPTTokenizerFast
_snake_case = True
_snake_case = False
def snake_case ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__lowerCAmelCase : Tuple = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Any:
return "lower newer", "lower newer"
def snake_case ( self ) -> List[str]:
__lowerCAmelCase : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__lowerCAmelCase : List[str] = 'lower'
__lowerCAmelCase : Union[str, Any] = ['low', 'er</w>']
__lowerCAmelCase : List[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = tokens + ['<unk>']
__lowerCAmelCase : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self , SCREAMING_SNAKE_CASE=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
__lowerCAmelCase : Optional[Any] = 'This is a simple input'
__lowerCAmelCase : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
__lowerCAmelCase : int = ('This is a simple input', 'This is a pair')
__lowerCAmelCase : Optional[Any] = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , )
def snake_case ( self ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase__ ( a ):
'''simple docstring'''
pass
| 123
| 0
|
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _A ( __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] =RoCBertTokenizer
SCREAMING_SNAKE_CASE_ : Dict =None
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
SCREAMING_SNAKE_CASE_ : Union[str, Any] =filter_non_english
def _a (self ) -> str:
'''simple docstring'''
super().setUp()
UpperCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for i, value in enumerate(A__ ):
UpperCamelCase__ = i
UpperCamelCase__ = i
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(A__ , A__ , ensure_ascii=A__ )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(A__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A__ ) , [5, 6, 2, 5, 7, 8] )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ , strip_accents=A__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = RoCBertBasicTokenizer(do_lower_case=A__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCamelCase__ = {}
for i, token in enumerate(A__ ):
UpperCamelCase__ = i
UpperCamelCase__ = RoCBertWordpieceTokenizer(vocab=A__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _a (self ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _a (self ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
UpperCamelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def _a (self ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(
A__ , return_attention_mask=A__ , return_token_type_ids=A__ , return_offsets_mapping=A__ , add_special_tokens=A__ , )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(A__ , '''do_lower_case''' ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = ["""的""", """人""", """有"""]
UpperCamelCase__ = """""".join(A__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = tokenizer_p.encode(A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer_r.encode(A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(A__ )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(A__ , **A__ )
UpperCamelCase__ = tokenizer_r.encode(A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer_p.encode(A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(A__ )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(A__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(A__ )
]
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase__ = tokenizer.encode('''你好''' , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.encode('''你是谁''' , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers(do_lower_case=A__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ = """你好,你是谁"""
UpperCamelCase__ = tokenizer.tokenize(A__ )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(A__ )
UpperCamelCase__ = tokenizer.convert_tokens_to_shape_ids(A__ )
UpperCamelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(A__ )
UpperCamelCase__ = tokenizer.prepare_for_model(
A__ , A__ , A__ , add_special_tokens=A__ )
UpperCamelCase__ = tokenizer.encode_plus(A__ , add_special_tokens=A__ )
self.assertEqual(A__ , A__ )
| 415
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
A_ : str = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Dict:
A__ : Union[str, Any] = test_results.split(""" """ )
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : List[str] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase (lowercase_: Any ) -> Optional[int]:
A__ : Dict = {}
A__ : Union[str, Any] = None
A__ : List[str] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , lowercase_ ):
A__ : Tuple = True
A__ : Dict = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : Union[str, Any] = line
A__ : List[str] = False
return failures
class _a :
'''simple docstring'''
def __init__( self , A__ , A__ ):
A__ : Optional[Any] = title
A__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str = doc_test_results["""success"""]
A__ : Optional[int] = doc_test_results["""failures"""]
A__ : int = self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : Optional[int] = doc_test_results
@property
def __A ( self ):
A__ : Tuple = [self._time_spent]
A__ : Tuple = 0
for time in time_spent:
A__ : Dict = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(A__ ) == 1:
A__ : Dict = [0, 0, time_parts[0]]
A__ , A__ , A__ : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
A__ , A__ , A__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(A__ )}h{int(A__ )}m{int(A__ )}s"""
@property
def __A ( self ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __A ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __A ( self ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __A ( self ):
A__ : Tuple = 40
A__ : Dict = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(A__ , A__ )}
A__ : str = """"""
for category, failures in category_failures.items():
if len(A__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(A__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __A ( self ):
A__ : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(A__ )
@staticmethod
def __A ( ):
A__ : List[str] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(A__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=A__ , )
def __A ( self ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Any = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
A__ : Any = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=A__ , )
def __A ( self , A__ , A__ , A__ , A__ ):
A__ : Tuple = """"""
for key, value in failures.items():
A__ : Any = value[:200] + """ [Truncated]""" if len(A__ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
A__ : Optional[Any] = job_name
A__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __A ( self ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : List[Any] = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda A__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
A__ : Any = job_result["""failures"""]
A__ : List[str] = self.get_reply_blocks(A__ , A__ , A__ , text=A__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F"""Results for {job}""" , blocks=A__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCamelCase () -> Dict:
A__ : int = os.environ["""GITHUB_RUN_ID"""]
A__ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
A__ : Optional[int] = requests.get(lowercase_ ).json()
A__ : List[str] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Dict = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
A__ : str = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , lowercase_ )
return {}
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : List[Any] = {}
if os.path.exists(lowercase_ ):
A__ : List[str] = os.listdir(lowercase_ )
for file in files:
try:
with open(os.path.join(lowercase_ , lowercase_ ) , encoding="""utf-8""" ) as f:
A__ : Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(lowercase_ , lowercase_ )}.""" ) from e
return _artifact
def UpperCamelCase () -> Union[str, Any]:
class _a :
'''simple docstring'''
def __init__( self , A__ ):
A__ : str = name
A__ : Optional[int] = []
def __str__( self ):
return self.name
def __A ( self , A__ ):
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] = {}
A__ : int = filter(os.path.isdir , os.listdir() )
for directory in directories:
A__ : Dict = directory
if artifact_name not in _available_artifacts:
A__ : int = Artifact(lowercase_ )
_available_artifacts[artifact_name].add_path(lowercase_ )
return _available_artifacts
if __name__ == "__main__":
A_ : str = get_job_links()
A_ : Dict = retrieve_available_artifacts()
A_ : int = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
A_ : int = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
A_ : Optional[Any] = github_actions_job_links.get('run_doctests')
A_ : str = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
A_ : List[Any] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
A_ , A_ , A_ : Any = handle_test_results(artifact['stats'])
A_ : Union[str, Any] = failed
A_ : int = success
A_ : Optional[Any] = time_spent[1:-1] + ', '
A_ : Optional[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
A_ : Dict = line.replace('FAILED ', '')
A_ : Dict = line.split()[0].replace('\n', '')
if "::" in line:
A_ , A_ : Dict = line.split('::')
else:
A_ , A_ : Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
A_ : List[str] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
A_ : Optional[int] = all_failures[test] if test in all_failures else 'N/A'
A_ : List[str] = failure
break
A_ : Optional[Any] = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 456
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A = imread(R'digital_image_processing/image_data/lena_small.jpg')
A = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase ( ):
lowerCamelCase : int = cn.convert_to_negative(__snake_case)
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg') as img:
# Work around assertion for response
assert str(cc.change_contrast(__snake_case , 1_10)).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at')
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase ( ):
lowerCamelCase : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase : Optional[Any] = canny.canny(__snake_case)
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase ( ):
assert gg.gaussian_filter(__snake_case , 5 , sigma=0.9).all()
def UpperCAmelCase ( ):
# laplace diagonals
lowerCamelCase : str = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]])
lowerCamelCase : Any = conv.img_convolve(__snake_case , __snake_case).astype(__snake_case)
assert res.any()
def UpperCAmelCase ( ):
assert med.median_filter(__snake_case , 3).any()
def UpperCAmelCase ( ):
lowerCamelCase , lowerCamelCase : List[str] = sob.sobel_filter(__snake_case)
assert grad.any() and theta.any()
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = sp.make_sepia(__snake_case , 20)
assert sepia.all()
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase : Tuple = bs.Burkes(imread(__snake_case , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase ( UpperCAmelCase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase : List[str] = rs.NearestNeighbour(imread(__snake_case , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def UpperCAmelCase ( ):
lowerCamelCase : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCamelCase : Any = imread(__snake_case , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[str] = 0
lowerCamelCase : Dict = image[x_coordinate][y_coordinate]
lowerCamelCase : Tuple = lbp.get_neighbors_pixel(
__snake_case , __snake_case , __snake_case , __snake_case)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase : Any = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase : str = lbp.local_binary_value(__snake_case , __snake_case , __snake_case)
assert lbp_image.any()
| 705
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
lowerCamelCase : Dict = MaskFormerConfig(backbone_config=UpperCAmelCase__)
lowerCamelCase : Dict = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 8_47
lowerCamelCase : Any = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCamelCase : str = 1_50
lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase : int = 1_71
lowerCamelCase : List[Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCamelCase : int = 1_33
lowerCamelCase : List[str] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase : str = 19
lowerCamelCase : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 65
lowerCamelCase : List[Any] = 'mapillary-vistas-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset') , 'r'))
lowerCamelCase : str = {int(UpperCAmelCase__): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight'))
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight'''))
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias'''))
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias'))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias'''))
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight'))
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias'))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias'''))
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias'''))
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias'''))
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias'''))
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias'''))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias'''))
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias'''))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias'))
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias'))
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight'))
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias'))
for i in range(3):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias'''))
# fmt: on
return rename_keys
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[str] = dct.pop(UpperCAmelCase__)
lowerCamelCase : Tuple = val
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase : str = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Union[str, Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''')
lowerCamelCase : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[:dim, :]
lowerCamelCase : Dict = in_proj_bias[: dim]
lowerCamelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : Dict = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-dim :, :
]
lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]):
# fmt: off
lowerCamelCase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''')
lowerCamelCase : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[: hidden_size, :]
lowerCamelCase : str = in_proj_bias[:config.hidden_size]
lowerCamelCase : int = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : List[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''')
lowerCamelCase : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase : Any = in_proj_bias[:config.hidden_size]
lowerCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__).raw)
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False):
lowerCamelCase : Optional[Any] = get_maskformer_config(UpperCAmelCase__)
# load original state_dict
with open(UpperCAmelCase__ , 'rb') as f:
lowerCamelCase : Tuple = pickle.load(UpperCAmelCase__)
lowerCamelCase : str = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase : Optional[Any] = create_rename_keys(UpperCAmelCase__)
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config)
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__)
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase__)
# load 🤗 model
lowerCamelCase : str = MaskFormerForInstanceSegmentation(UpperCAmelCase__)
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase__ , param.shape)
lowerCamelCase , lowerCamelCase : Optional[Any] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase__) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase : Optional[int] = 65
elif "cityscapes" in model_name:
lowerCamelCase : Any = 6_55_35
else:
lowerCamelCase : List[Any] = 2_55
lowerCamelCase : str = True if 'ade' in model_name else False
lowerCamelCase : List[Any] = MaskFormerImageProcessor(ignore_index=UpperCAmelCase__ , reduce_labels=UpperCAmelCase__)
lowerCamelCase : str = image_processor(UpperCAmelCase__ , return_tensors='pt')
lowerCamelCase : Dict = model(**UpperCAmelCase__)
print('Logits:' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase : List[str] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''')
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
image_processor.save_pretrained(UpperCAmelCase__)
if push_to_hub:
print('Pushing model and image processor to the hub...')
model.push_to_hub(F'''nielsr/{model_name}''')
image_processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 449
| 0
|
"""simple docstring"""
from __future__ import annotations
import bisect
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : Any = len(_A )
while lo < hi:
_lowerCAmelCase : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_lowerCAmelCase : List[Any] = mid + 1
else:
_lowerCAmelCase : Union[str, Any] = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
if hi < 0:
_lowerCAmelCase : List[Any] = len(_A )
while lo < hi:
_lowerCAmelCase : Dict = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_lowerCAmelCase : str = mid + 1
else:
_lowerCAmelCase : Tuple = mid
return lo
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_left(_A , _A , _A , _A ) , _A )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0 , _lowerCamelCase = -1 ):
'''simple docstring'''
sorted_collection.insert(bisect_right(_A , _A , _A , _A ) , _A )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = len(_A ) - 1
while left <= right:
_lowerCAmelCase : Optional[Any] = left + (right - left) // 2
_lowerCAmelCase : Tuple = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_lowerCAmelCase : List[Any] = midpoint - 1
else:
_lowerCAmelCase : Optional[int] = midpoint + 1
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = bisect.bisect_left(_A , _A )
if index != len(_A ) and sorted_collection[index] == item:
return index
return None
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if right < left:
return None
_lowerCAmelCase : Optional[Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_A , _A , _A , midpoint - 1 )
else:
return binary_search_by_recursion(_A , _A , midpoint + 1 , _A )
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase = sorted(int(item) for item in user_input.split(""","""))
_lowerCAmelCase = int(input("""Enter a single number to be found in the list:\n"""))
_lowerCAmelCase = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 259
|
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = 0
snake_case_ :Dict = 0
snake_case_ :Any = {}
def _a ( self , a ):
"""simple docstring"""
if vertex not in self.adjacency:
snake_case_ :str = {}
self.num_vertices += 1
def _a ( self , a , a , a ):
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
snake_case_ :List[Any] = weight
snake_case_ :str = weight
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = self.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
snake_case_ :Dict = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case_ :Tuple = edges[i][2] + 1
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
snake_case_ :Dict = weight
snake_case_ :int = weight
def __str__( self ):
"""simple docstring"""
snake_case_ :List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case_ :List[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def _a ( self ):
"""simple docstring"""
snake_case_ :int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _a ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _a ( a=None , a=None ):
"""simple docstring"""
snake_case_ :Optional[Any] = Graph()
if vertices is None:
snake_case_ :int = []
if edges is None:
snake_case_ :Any = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Dict = {}
snake_case_ :Optional[int] = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def _a ( self , a ):
"""simple docstring"""
if item in self.parent:
return self.find(a )
snake_case_ :Optional[Any] = item
snake_case_ :str = 0
return item
def _a ( self , a ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
snake_case_ :Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Any = self.find(a )
snake_case_ :str = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case_ :Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case_ :Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case_ :Union[str, Any] = roota
return roota
return None
@staticmethod
def _a ( a ):
"""simple docstring"""
snake_case_ :Any = graph.num_vertices
snake_case_ :Any = Graph.UnionFind()
snake_case_ :Optional[Any] = []
while num_components > 1:
snake_case_ :List[Any] = {}
for vertex in graph.get_vertices():
snake_case_ :str = -1
snake_case_ :Tuple = graph.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
snake_case_ :List[Any] = union_find.find(a )
snake_case_ :Union[str, Any] = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case_ , snake_case_ , snake_case_ :List[Any] = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
snake_case_ :List[Any] = num_components - 1
snake_case_ :Any = Graph.build(edges=a )
return mst
| 584
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : list[int] ):
__lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
print("The following activities are selected:" )
# The first activity is always selected
__lowerCAmelCase = 0
print(SCREAMING_SNAKE_CASE_ , end="," )
# Consider rest of the activities
for j in range(SCREAMING_SNAKE_CASE_ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(SCREAMING_SNAKE_CASE_ , end="," )
__lowerCAmelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = [1, 3, 0, 5, 8, 5]
UpperCamelCase__ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 552
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 552
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DiTPipeline
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=_a , )
lowerCamelCase = AutoencoderKL()
lowerCamelCase = DDIMScheduler()
lowerCamelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _lowerCAmelCase ( self , _a , _a=0 ):
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(_a )
else:
lowerCamelCase = torch.Generator(device=_a ).manual_seed(_a )
lowerCamelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCamelCase = self.get_dummy_inputs(_a )
lowerCamelCase = pipe(**_a ).images
lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCamelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowerCamelCase = ["""vase""", """umbrella"""]
lowerCamelCase = pipe.get_label_ids(_a )
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(_a , generator=_a , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(_a , _a ):
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 543
|
"""simple docstring"""
def a__ ( snake_case__ = 10_00 ) -> int:
lowerCamelCase , lowerCamelCase = 1, 1
lowerCamelCase = 2
while True:
lowerCamelCase = 0
lowerCamelCase = fa + fa
lowerCamelCase , lowerCamelCase = fa, f
index += 1
for _ in str(snake_case__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 543
| 1
|
from math import sqrt
def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowercase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 708
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Tuple , A_ : str , A_ : int ) -> Any:
"""simple docstring"""
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(A_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = None
ops.enable_eager_execution_internal()
lowerCamelCase_ = tf.config.list_physical_devices('CPU' )
if len(A_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCamelCase_ = tf.config.list_logical_devices(device_type='CPU' )
lowerCamelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCamelCase_ = GradientAccumulator()
lowerCamelCase_ = tf.Variable([4.0, 3.0] )
lowerCamelCase_ , lowerCamelCase_ = create_optimizer(5E-5 , 10 , 5 )
lowerCamelCase_ = tf.Variable([0.0, 0.0] , trainable=A_ )
def accumulate_on_replica(A_ : Any ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(A_ : List[Any] , A_ : Tuple ):
with strategy.scope():
lowerCamelCase_ = strategy.experimental_local_results(A_ )
local_variables[0].assign(A_ )
local_variables[1].assign(A_ )
strategy.run(A_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(A_ )
def _check_local_values(A_ : List[Any] , A_ : str ):
lowerCamelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , A_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , A_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 651
| 0
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError('Quantized models are not supported.' )
_lowerCAmelCase : Union[str, Any] = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , _A )
if matches:
_lowerCAmelCase : int = float(matches[1] )
_lowerCAmelCase : int = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCAmelCase : List[str] = 1_0_0_1
_lowerCAmelCase : Union[str, Any] = 'imagenet-1k-id2label.json'
_lowerCAmelCase : Optional[int] = 'huggingface/label-files'
_lowerCAmelCase : Tuple = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase : Dict = {int(_A ) + 1: v for k, v in idalabel.items()}
_lowerCAmelCase : str = 'background'
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase : int = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def lowercase (_A , _A , _A , _A=False ):
"""simple docstring"""
_lowerCAmelCase : str = get_mobilenet_va_config(_A )
# Load 🤗 model
_lowerCAmelCase : Tuple = MobileNetVaForImageClassification(_A ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_A , _A , _A )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 3_2} , )
_lowerCAmelCase : List[str] = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase : Dict = model(**_A )
_lowerCAmelCase : Optional[int] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
_lowerCAmelCase : Optional[int] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCAmelCase : List[str] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
_lowerCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _A , atol=1E-4 )
Path(_A ).mkdir(exist_ok=_A )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_A )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_A )
if push_to_hub:
print('Pushing to the hub...' )
_lowerCAmelCase : int = 'google/' + model_name
image_processor.push_to_hub(_A )
model.push_to_hub(_A )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase : int = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 444
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = """docs/source/en/_toctree.yml"""
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = defaultdict(_A )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_A )
_lowerCAmelCase : str = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : List[str] = []
for duplicate_key in duplicates:
_lowerCAmelCase : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase : Any = sorted(_A , key=lambda _A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_A ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_A )
# Sort
return overview_doc
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Union[str, Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : str = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Dict = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[int] = api_doc[scheduler_idx]['sections']
_lowerCAmelCase : Any = clean_doc_toc(_A )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : str = True
if overwrite:
_lowerCAmelCase : int = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Dict = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowercase (_A=False ):
"""simple docstring"""
with open(_A , encoding='utf-8' ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Union[str, Any] = content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase : Optional[int] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[Any] = api_doc[pipeline_idx]['sections']
_lowerCAmelCase : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : Tuple = pipeline_doc['section']
_lowerCAmelCase : Optional[int] = clean_doc_toc(_A )
if overwrite:
_lowerCAmelCase : str = new_sub_pipeline_doc
new_pipeline_docs.append(_A )
# sort overall pipeline doc
_lowerCAmelCase : str = clean_doc_toc(_A )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Any = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : str = api_doc
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : List[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 444
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Optional[int] = [0]
lowerCamelCase_ : Union[str, Any] = [0]
lowerCamelCase_ : Any = len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
lowerCamelCase_ : Optional[int] = [60]
lowerCamelCase_ : Optional[Any] = [10]
lowerCamelCase_ : Any = len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : int = [1, 2, 3]
lowerCamelCase_ : Union[str, Any] = [3, 2, 1]
lowerCamelCase_ : str = len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = 50
lowerCamelCase_ : int = [60, 100, 120]
lowerCamelCase_ : str = [10, 20, 30]
lowerCamelCase_ : Optional[int] = len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 714
|
'''simple docstring'''
import random
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : List[str] = a[left_index]
lowerCamelCase_ : List[str] = left_index + 1
for j in range(left_index + 1 , __UpperCAmelCase ):
if a[j] < pivot:
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = a[i], a[j]
i += 1
lowerCamelCase_ , lowerCamelCase_ : Tuple = a[i - 1], a[left_index]
return i - 1
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if left < right:
lowerCamelCase_ : int = random.randint(__UpperCAmelCase , right - 1 )
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCamelCase_ : List[str] = partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
quick_sort_random(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__UpperCAmelCase , pivot_index + 1 , __UpperCAmelCase ) # recursive quicksort to the right of the pivot point
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Optional[int] = input('''Enter numbers separated by a comma:\n''' ).strip()
lowerCamelCase_ : Optional[Any] = [int(__UpperCAmelCase ) for item in user_input.split(''',''' )]
quick_sort_random(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) )
print(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 418
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : int = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 673
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase_ ( a_ ):
_A : Optional[int] = 'facebook/bart-large-mnli'
_A : Union[str, Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_A : Dict = 'text_classifier'
_A : Union[str, Any] = AutoTokenizer
_A : Tuple = AutoModelForSequenceClassification
_A : Optional[int] = ['text', ['text']]
_A : Dict = ['text']
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
super().setup()
UpperCAmelCase = self.model.config
UpperCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [f'''This example is {label}''' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def UpperCamelCase_ ( self , snake_case__ ) -> str:
"""simple docstring"""
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673
| 1
|
import numpy as np
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ )-> Any:
return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 619
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'glpn'
def __init__( self : Tuple,__A : Optional[int]=3,__A : Optional[int]=4,__A : str=[2, 2, 2, 2],__A : Union[str, Any]=[8, 4, 2, 1],__A : Tuple=[3_2, 6_4, 1_6_0, 2_5_6],__A : int=[7, 3, 3, 3],__A : str=[4, 2, 2, 2],__A : int=[1, 2, 5, 8],__A : List[Any]=[4, 4, 4, 4],__A : Optional[int]="gelu",__A : int=0.0,__A : Tuple=0.0,__A : Tuple=0.02,__A : Optional[int]=0.1,__A : Optional[int]=1e-6,__A : Optional[int]=6_4,__A : Optional[Any]=1_0,__A : Tuple=-1,**__A : List[str],):
super().__init__(**__A )
_lowerCamelCase : Tuple = num_channels
_lowerCamelCase : Union[str, Any] = num_encoder_blocks
_lowerCamelCase : Dict = depths
_lowerCamelCase : List[Any] = sr_ratios
_lowerCamelCase : str = hidden_sizes
_lowerCamelCase : Any = patch_sizes
_lowerCamelCase : Any = strides
_lowerCamelCase : Dict = mlp_ratios
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = drop_path_rate
_lowerCamelCase : str = layer_norm_eps
_lowerCamelCase : Tuple = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 44
| 1
|
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ = 2_0 ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ : Tuple = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(f'{solution() = }')
| 712
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase_ ( lowerCamelCase_ = 2_0_0_0_0_0_0 ):
"""simple docstring"""
lowerCAmelCase__ : list[int] = [0]
lowerCAmelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase__ : int = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase__ : float
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the triangle number corresponding to b_floor
lowerCAmelCase__ : int
# the triangle number corresponding to b_ceil
lowerCAmelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase__ : Optional[int] = floor(lowerCamelCase_ )
lowerCAmelCase__ : Any = ceil(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = triangle_numbers[b_floor]
lowerCAmelCase__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Tuple = triangle_b_first_guess * triangle_a
lowerCAmelCase__ : List[str] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Dict = triangle_b_second_guess * triangle_a
lowerCAmelCase__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 568
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Union[str, Any] ={
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A : Dict = get_logger(__name__)
A : Dict = R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase :
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self , __a , __a , __a , **__a ):
for processor in self:
__lowerCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(__a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"Make sure that all the required parameters: {list(function_args.keys() )} for "
f"{processor.__class__} are passed to the logits processor." )
__lowerCAmelCase = processor(__a , __a , __a , **__a )
else:
__lowerCAmelCase = processor(__a , __a , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
if not isinstance(__a , __a ) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}" )
__lowerCAmelCase = temperature
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores / self.temperature
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}" )
if not isinstance(__a , __a ) or (min_tokens_to_keep < 1):
raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}" )
__lowerCAmelCase = top_p
__lowerCAmelCase = filter_value
__lowerCAmelCase = min_tokens_to_keep
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , scores.shape[-1] )
__lowerCAmelCase = jnp.full_like(__a , self.filter_value )
__lowerCAmelCase = jax.nn.softmax(__a , axis=-1 ).cumsum(axis=-1 )
__lowerCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase = jnp.roll(__a , 1 )
score_mask |= score_mask.at[:, 0].set(__a )
# min tokens to keep
__lowerCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(__a )
__lowerCAmelCase = jnp.where(__a , __a , __a )
__lowerCAmelCase = jax.lax.sort_key_val(__a , __a )[-1]
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a = -float("Inf" ) , __a = 1 ):
if not isinstance(__a , __a ) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}" )
__lowerCAmelCase = max(__a , __a )
__lowerCAmelCase = filter_value
def __call__( self , __a , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = scores.shape
__lowerCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
__lowerCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
__lowerCAmelCase , __lowerCAmelCase = lax.top_k(__a , __a )
__lowerCAmelCase = jnp.broadcast_to((jnp.arange(__a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__lowerCAmelCase = topk_scores.flatten()
__lowerCAmelCase = topk_indices.flatten() + shift
__lowerCAmelCase = next_scores_flat.at[topk_indices_flat].set(__a )
__lowerCAmelCase = next_scores_flat.reshape(__a , __a )
return next_scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = bos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.bos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = max_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = jnp.full(scores.shape , -float("inf" ) )
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__lowerCAmelCase = jnp.where(__a , new_scores.at[:, self.eos_token_id].set(0 ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
if not isinstance(__a , __a ) or min_length < 0:
raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}" )
if not isinstance(__a , __a ) or eos_token_id < 0:
raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}" )
__lowerCAmelCase = min_length
__lowerCAmelCase = eos_token_id
def __call__( self , __a , __a , __a ):
# create boolean flag to decide if min length penalty should be applied
__lowerCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = list(__a )
__lowerCAmelCase = begin_index
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
__lowerCAmelCase = jnp.where(__a , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , __a )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = list(__a )
def __call__( self , __a , __a , __a ):
__lowerCAmelCase = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = dict(__a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase = force_token_array.at[index].set(__a )
__lowerCAmelCase = jnp.intaa(__a )
def __call__( self , __a , __a , __a ):
def _force_token(__a ):
__lowerCAmelCase = scores.shape[0]
__lowerCAmelCase = self.force_token_array[generation_idx]
__lowerCAmelCase = jnp.ones_like(__a , dtype=scores.dtype ) * -float("inf" )
__lowerCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__lowerCAmelCase = lax.dynamic_update_slice(__a , __a , (0, current_token) )
return new_scores
__lowerCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__a ) , lambda: scores , ) , )
return scores
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a ):
__lowerCAmelCase = generate_config.eos_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id
__lowerCAmelCase = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__a , "max_initial_timestamp_index" ):
__lowerCAmelCase = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase = model_config.vocab_size
def __call__( self , __a , __a , __a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(__a , __a ):
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __a , )
__lowerCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , __a , __a )
__lowerCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __a , __a , )
return jnp.where(
__a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
__lowerCAmelCase = jnp.where(cur_len == self.begin_index , __a , __a )
__lowerCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __a , )
__lowerCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase = jnp.where(
__a , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , __a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase = jax.nn.log_softmax(__a , axis=-1 )
def handle_cumulative_probs(__a , __a ):
__lowerCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__lowerCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , __a , )
__lowerCAmelCase = jax.vmap(__a )(__a , __a )
return scores
| 636
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase( self ):
_snake_case = 1
_snake_case = 3
_snake_case = (32, 32)
_snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCamelCase( self ):
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type="v_prediction" )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = "A painting of a squirrel eating a burger"
_snake_case = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_snake_case = output.images
_snake_case = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=lowerCamelCase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
_snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_snake_case = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type="v_prediction" )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = "A painting of a squirrel eating a burger"
_snake_case = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_snake_case = output.images
assert image.shape[0] == 2
_snake_case = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_snake_case = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCamelCase( self ):
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type="v_prediction" )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_snake_case = unet.half()
_snake_case = text_encoder.half()
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowerCamelCase , low_res_scheduler=lowerCamelCase , scheduler=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = "A painting of a squirrel eating a burger"
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images
_snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_snake_case = "stabilityai/stable-diffusion-x4-upscaler"
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
_snake_case = "a cat sitting on a park bench"
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , output_type="np" , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCamelCase( self ):
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_snake_case = "stabilityai/stable-diffusion-x4-upscaler"
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
_snake_case = "a cat sitting on a park bench"
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , output_type="np" , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCamelCase( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_snake_case = "stabilityai/stable-diffusion-x4-upscaler"
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = "a cat sitting on a park bench"
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , output_type="np" , )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = [0 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# initialize interval's left pointer and right pointer
_snake_case , _snake_case = 0, 0
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_snake_case = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_snake_case = min_edge
while go_next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_snake_case , _snake_case = i, i + z_result[i] - 1
return z_result
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return i + z_result[i] < len(SCREAMING_SNAKE_CASE__ ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_snake_case = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(SCREAMING_SNAKE_CASE__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] , _A : Tuple=None , _A : Dict=900 , _A : Union[str, Any]=2048 , _A : Union[str, Any]=6 , _A : List[str]=2048 , _A : str=8 , _A : Optional[int]=6 , _A : List[str]=1024 , _A : Optional[int]=8 , _A : List[str]=0.0 , _A : List[str]=True , _A : Any="relu" , _A : Any=256 , _A : Optional[int]=0.1 , _A : str=0.0 , _A : Dict=0.0 , _A : str=0.02 , _A : Union[str, Any]=1.0 , _A : Union[str, Any]=True , _A : Any=False , _A : Union[str, Any]="sine" , _A : int=5 , _A : Optional[Any]=4 , _A : Any=4 , _A : Union[str, Any]=True , _A : Dict=300 , _A : List[Any]=True , _A : Any=True , _A : Tuple=1 , _A : Optional[int]=5 , _A : str=2 , _A : Tuple=1 , _A : Tuple=1 , _A : Any=5 , _A : Tuple=2 , _A : str=0.1 , _A : List[str]=0.25 , **_A : Dict , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_A , _A ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(_A )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def UpperCamelCase_ ( self : int ):
return self.d_model
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 10
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : Union[str, Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["ViTFeatureExtractor"]
lowercase : int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[str] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''vit_mae'''
def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=16 , __UpperCAmelCase=5_12 , __UpperCAmelCase=8 , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.7_5 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(**__UpperCAmelCase )
A : List[Any] = hidden_size
A : str = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : List[str] = intermediate_size
A : Any = hidden_act
A : str = hidden_dropout_prob
A : Optional[int] = attention_probs_dropout_prob
A : Optional[Any] = initializer_range
A : Optional[int] = layer_norm_eps
A : List[Any] = image_size
A : Tuple = patch_size
A : Optional[int] = num_channels
A : int = qkv_bias
A : Optional[Any] = decoder_num_attention_heads
A : Optional[Any] = decoder_hidden_size
A : Union[str, Any] = decoder_num_hidden_layers
A : List[str] = decoder_intermediate_size
A : List[Any] = mask_ratio
A : Union[str, Any] = norm_pix_loss
| 423
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_UpperCamelCase = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
_UpperCamelCase = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_UpperCamelCase = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_UpperCamelCase = sorted(arg_to_scheduler.keys())
_UpperCamelCase = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __UpperCAmelCase (pl.LightningModule ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=None , snake_case_="base" , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(snake_case_ )
A__ : Tuple = 0
A__ : Tuple = Path(self.hparams.output_dir )
A__ : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
A__ : Union[str, Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=snake_case_ , **snake_case_ , )
else:
A__ : PretrainedConfig = config
A__ : int = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , snake_case_ , snake_case_ ):
assert hasattr(self.config , snake_case_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , snake_case_ , getattr(self.hparams , snake_case_ ) )
if tokenizer is None:
A__ : str = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=snake_case_ , )
else:
A__ : PreTrainedTokenizer = tokenizer
A__ : str = MODEL_MODES[mode]
if model is None:
A__ : List[str] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=snake_case_ , )
else:
A__ : Optional[int] = model
def lowerCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = self.model_type.from_pretrained(*snake_case_ , **snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = arg_to_scheduler[self.hparams.lr_scheduler]
A__ : List[Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
A__ : Optional[Any] = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[str] = self.model
A__ : Optional[Any] = ["""bias""", """LayerNorm.weight"""]
A__ : List[str] = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
A__ : Optional[Any] = Adafactor(
snake_case_ , lr=self.hparams.learning_rate , scale_parameter=snake_case_ , relative_step=snake_case_ )
else:
A__ : str = AdamW(
snake_case_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
A__ : str = optimizer
A__ : Dict = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
return self.validation_step(snake_case_ , snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return self.validation_end(snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[int] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
A__ : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
if stage == "test":
A__ : Optional[Any] = len(self.test_dataloader().dataset )
else:
A__ : int = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=snake_case_ )
A__ : int = len(self.train_dataloader().dataset )
def lowerCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = False ):
'''simple docstring'''
raise NotImplementedError("""You must implement this for your task""" )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.train_loader
def lowerCamelCase ( self ):
'''simple docstring'''
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
snake_case_ , list(filter(snake_case_ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Any = self.output_dir.joinpath("""best_tfmr""" )
A__ : Any = self.step_count
self.model.save_pretrained(snake_case_ )
self.tokenizer.save_pretrained(snake_case_ )
@staticmethod
def lowerCamelCase ( snake_case_ , snake_case_ ):
'''simple docstring'''
parser.add_argument(
"""--model_name_or_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=snake_case_ , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=snake_case_ , type=snake_case_ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(snake_case_ ).parent / """test_run""" / """cache""" ) , type=snake_case_ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=snake_case_ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=snake_case_ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=snake_case_ , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=snake_case_ , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=snake_case_ , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=snake_case_ , metavar=snake_case_ , type=snake_case_ , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=snake_case_ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=snake_case_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=snake_case_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=snake_case_ , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=snake_case_ )
parser.add_argument("""--train_batch_size""" , default=32 , type=snake_case_ )
parser.add_argument("""--eval_batch_size""" , default=32 , type=snake_case_ )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class __UpperCAmelCase (pl.Callback ):
'''simple docstring'''
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __UpperCAmelCase (pl.Callback ):
'''simple docstring'''
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(snake_case_ )
class __UpperCAmelCase (pl.Callback ):
'''simple docstring'''
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : List[str] = trainer.lr_schedulers[0]["""scheduler"""]
A__ : Any = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(snake_case_ )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
rank_zero_info("""***** Validation results *****""" )
A__ : Optional[int] = trainer.callback_metrics
# Log results
for key in sorted(snake_case_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(snake_case_ , str(metrics[key] ) ) )
def lowerCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
rank_zero_info("""***** Test results *****""" )
A__ : List[str] = trainer.callback_metrics
# Log and save results to file
A__ : List[Any] = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(snake_case_ , """w""" ) as writer:
for key in sorted(snake_case_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(snake_case_ , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(snake_case_ , str(metrics[key] ) ) )
def _A( lowerCAmelCase , lowerCAmelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[] , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
pl.seed_everything(args.seed )
# init model
A__ : Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
A__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
A__ : str = LoggingCallback()
A__ : Optional[Any] = {}
if args.fpaa:
A__ : List[str] = 16
if args.gpus > 1:
A__ : Optional[int] = """auto"""
A__ : Tuple = """ddp"""
A__ : int = args.accumulate_grad_batches
A__ : Union[str, Any] = None
A__ : Any = """auto"""
A__ : List[str] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 363
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _A( lowerCAmelCase ):
def decorator(lowerCAmelCase ):
A__ : Any = getattr(lowerCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCAmelCase , """handle_key""" , lowerCAmelCase )
return func
return decorator
def _A( *lowerCAmelCase ):
def decorator(lowerCAmelCase ):
A__ : Optional[Any] = getattr(lowerCAmelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCAmelCase , """handle_key""" , lowerCAmelCase )
return func
return decorator
class __UpperCAmelCase (__A ):
'''simple docstring'''
def __new__( cls , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
A__ : Union[str, Any] = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A__ : Dict = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
A__ : Optional[Any] = value
return new_cls
@staticmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
A__ : int = get_character()
if char != KEYMAP["undefined"]:
A__ : Union[str, Any] = ord(snake_case_ )
A__ : int = cls.key_handler.get(snake_case_ )
if handler:
A__ : int = char
return handler(cls )
else:
return None
def _A( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 363
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( A_ , A_ ):
if not isinstance(A_ , A_ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(A_ , A_ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__lowerCamelCase = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = k_size // 2
__lowerCamelCase , __lowerCamelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__lowerCamelCase = 1 / (2 * pi * sigma) * exp(-(square(A_ ) + square(A_ )) / (2 * square(A_ )) )
return g
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase , __lowerCamelCase = image.shape[0], image.shape[1]
# dst image height and width
__lowerCamelCase = height - k_size + 1
__lowerCamelCase = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__lowerCamelCase = zeros((dst_height * dst_width, k_size * k_size) )
__lowerCamelCase = 0
for i, j in product(range(A_ ) , range(A_ ) ):
__lowerCamelCase = ravel(image[i : i + k_size, j : j + k_size] )
__lowerCamelCase = window
row += 1
# turn the kernel into shape(k*k, 1)
__lowerCamelCase = gen_gaussian_kernel(A_ , A_ )
__lowerCamelCase = ravel(A_ )
# reshape and get the dst image
__lowerCamelCase = dot(A_ , A_ ).reshape(A_ , A_ ).astype(A_ )
return dst
if __name__ == "__main__":
# read original image
_UpperCamelCase : List[Any] =imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_UpperCamelCase : List[Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_UpperCamelCase : Tuple =gaussian_filter(gray, 3, sigma=1)
_UpperCamelCase : List[str] =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 575
| 1
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__magic_name__ = '''true'''
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=16):
'''simple docstring'''
set_seed(42)
lowerCamelCase_ : Any = RegressionModel()
lowerCamelCase_ : Optional[int] = deepcopy(lowerCAmelCase_)
lowerCamelCase_ : Optional[Any] = RegressionDataset(length=lowerCAmelCase_)
lowerCamelCase_ : Union[str, Any] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_)
model.to(accelerator.device)
lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_)
return model, ddp_model, dataloader
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=False):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased")
lowerCamelCase_ : Union[str, Any] = load_dataset("glue" , "mrpc" , split="validation")
def tokenize_function(lowerCAmelCase_):
lowerCamelCase_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_)
return outputs
with accelerator.main_process_first():
lowerCamelCase_ : str = dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
lowerCamelCase_ : Any = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(lowerCAmelCase_):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ , padding="longest" , return_tensors="pt")
return tokenizer.pad(lowerCAmelCase_ , padding="max_length" , max_length=128 , return_tensors="pt")
return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=16)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_)
lowerCamelCase_ : Dict = get_dataloader(lowerCAmelCase_ , not dispatch_batches)
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = []
for batch in dataloader:
lowerCamelCase_ ,lowerCamelCase_ : int = batch.values()
with torch.no_grad():
lowerCamelCase_ : List[Any] = model(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : Optional[Any] = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
lowerCamelCase_ ,lowerCamelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_)
targs.append(lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : List[str] = torch.cat(lowerCAmelCase_), torch.cat(lowerCAmelCase_)
return logits, targs
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=16):
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ ,lowerCamelCase_ : Dict = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)
assert (
len(lowerCAmelCase_) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_)}"""
def __magic_name__ ( lowerCAmelCase_ = False , lowerCAmelCase_ = False):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = evaluate.load("glue" , "mrpc")
lowerCamelCase_ ,lowerCamelCase_ : List[str] = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_)
# First do baseline
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = setup["no"]
model.to(lowerCAmelCase_)
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_)
with torch.inference_mode():
lowerCamelCase_ : str = model(**lowerCAmelCase_)
lowerCamelCase_ : Dict = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCAmelCase_ , references=batch["labels"])
lowerCamelCase_ : int = metric.compute()
# Then do distributed
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase_ : Union[str, Any] = model(**lowerCAmelCase_)
lowerCamelCase_ : str = outputs.logits.argmax(dim=-1)
lowerCamelCase_ : Any = batch["labels"]
lowerCamelCase_ ,lowerCamelCase_ : str = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__ ( ):
'''simple docstring'''
lowerCamelCase_ : Tuple = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCAmelCase_ , lowerCAmelCase_)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase_ : Tuple = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCAmelCase_ , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**")
lowerCamelCase_ : Any = Accelerator()
test_torch_metrics(lowerCAmelCase_ , 512)
accelerator.state._reset_state()
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 250
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 250
| 1
|
def A ( snake_case :Any ) -> str:
__UpperCamelCase = 1
__UpperCamelCase = 2
while i * i <= n:
__UpperCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 1
while True:
i += 1
t_num += i
if count_divisors(snake_case ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 710
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : List[Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
A = '''convnextv2'''
def __init__( self , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=0.0 , _lowerCAmelCase=224 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**_lowerCamelCase )
lowerCAmelCase__ :List[Any] = num_channels
lowerCAmelCase__ :Tuple = patch_size
lowerCAmelCase__ :str = num_stages
lowerCAmelCase__ :str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCAmelCase__ :Tuple = [3, 3, 9, 3] if depths is None else depths
lowerCAmelCase__ :Optional[Any] = hidden_act
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :int = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = drop_path_rate
lowerCAmelCase__ :Optional[Any] = image_size
lowerCAmelCase__ :Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ :str = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 145
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , *_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Any = eval_examples
UpperCAmelCase__ : Union[str, Any] = post_process_function
UpperCAmelCase__ : int = quant_trainer_args
UpperCAmelCase__ : Dict = 128 # default number of calibration samples
def _a (self , _lowerCamelCase=None ):
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCAmelCase__ : List[str] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase__ : Optional[int] = self._remove_unused_columns(_lowerCamelCase , description="""Calibration""" )
return DataLoader(
_lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_lowerCamelCase , )
def _a (self , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : int = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase__ : List[str] = self.get_calib_dataloader(_lowerCamelCase )
UpperCAmelCase__ : List[str] = self.model
quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args , calib=_lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(_lowerCamelCase )
logger.info("""***** Running calibration *****""" )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(_lowerCamelCase ):
# Prediction step
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.prediction_step(_lowerCamelCase , _lowerCamelCase , prediction_loss_only=_lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_lowerCamelCase , self.quant_trainer_args )
UpperCAmelCase__ : Tuple = model
def _a (self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = "eval" ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase__ : List[Any] = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Tuple = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Optional[Any] = self.compute_metrics
UpperCAmelCase__ : int = None
UpperCAmelCase__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : str = eval_loop(
_lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , )
finally:
UpperCAmelCase__ : List[str] = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase__ : int = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions )
UpperCAmelCase__ : List[Any] = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Optional[int] = metrics.pop(_lowerCamelCase )
self.log(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase__ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCamelCase )
return metrics
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase = "test" ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.get_test_dataloader(_lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase__ : Union[str, Any] = self.compute_metrics
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase__ : Optional[Any] = eval_loop(
_lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCamelCase , )
finally:
UpperCAmelCase__ : Dict = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase__ : Dict = self.post_process_function(_lowerCamelCase , _lowerCamelCase , output.predictions , """predict""" )
UpperCAmelCase__ : Any = self.compute_metrics(_lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase__ : Any = metrics.pop(_lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCamelCase )
def _a (self , _lowerCamelCase="./" ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.eval_dataset
UpperCAmelCase__ : int = self.get_eval_dataloader(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = next(iter(_lowerCamelCase ) )
# saving device - to make it consistent
UpperCAmelCase__ : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCAmelCase__ : List[Any] = tuple(v.to(_lowerCamelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Tuple = self.model.to(_lowerCamelCase )
model.eval()
model.float()
UpperCAmelCase__ : str = model.module if hasattr(_lowerCamelCase , """module""" ) else model
quant_trainer.configure_model(_lowerCamelCase , self.quant_trainer_args )
UpperCAmelCase__ : List[str] = os.path.join(_lowerCamelCase , """model.onnx""" )
logger.info(F"""exporting model to {output_model_file}""" )
UpperCAmelCase__ : Optional[Any] = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , export_params=_lowerCamelCase , opset_version=13 , do_constant_folding=_lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_lowerCamelCase , )
logger.info("""onnx export finished""" )
| 182
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : List[Any] = """fnet"""
def __init__( self : Tuple , snake_case : Optional[int]=32_000 , snake_case : Optional[Any]=768 , snake_case : Union[str, Any]=12 , snake_case : List[str]=3_072 , snake_case : str="gelu_new" , snake_case : Any=0.1 , snake_case : List[str]=512 , snake_case : Any=4 , snake_case : str=0.02 , snake_case : Union[str, Any]=1E-1_2 , snake_case : Optional[Any]=False , snake_case : List[Any]=512 , snake_case : int=3 , snake_case : Any=1 , snake_case : Union[str, Any]=2 , **snake_case : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = use_tpu_fourier_optimizations
A = tpu_short_seq_length
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 109
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
__UpperCAmelCase = "▁"
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = AlbertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Any = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: List[str] = do_lower_case
snake_case: Optional[int] = remove_space
snake_case: Any = keep_accents
snake_case: str = vocab_file
snake_case: str = False if not self.vocab_file else True
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: Dict = [self.sep_token_id]
snake_case: str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: str = [self.sep_token_id]
snake_case: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 329
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"""cls_token""": """<s>"""}
def A_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCamelCase : Optional[int] = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : Optional[Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = 'lower newer'
_lowerCamelCase : Union[str, Any] = 'lower newer'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : List[str] = 'lower newer'
_lowerCamelCase : Tuple = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : List[str] = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_lowerCamelCase : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_lowerCamelCase : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A_ ( self ):
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : str = 'Encode this sequence.'
_lowerCamelCase : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
_lowerCamelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowerCamelCase : Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
_lowerCamelCase : List[Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(lowercase )
_lowerCamelCase : Optional[int] = 'Encode <mask> sequence'
_lowerCamelCase : int = 'Encode <mask>sequence'
_lowerCamelCase : str = tokenizer.encode(lowercase )
_lowerCamelCase : Tuple = encoded.index(lowercase )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : Any = tokenizer.encode(lowercase )
_lowerCamelCase : Union[str, Any] = encoded.index(lowercase )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def A_ ( self ):
pass
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : List[str] = 'A, <mask> AllenNLP sentence.'
_lowerCamelCase : List[Any] = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase )
def A_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Dict = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : int = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : int = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 630
| 0
|
"""simple docstring"""
def A( snake_case_ = 50000000 ):
"""simple docstring"""
lowercase__: int = set()
lowercase__: Union[str, Any] = int((limit - 24) ** (1 / 2) )
lowercase__: Optional[int] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __snake_case ) ) )
for primea in primes:
lowercase__: List[Any] = primea * primea
for primea in primes:
lowercase__: List[str] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowercase__: str = primea * primea * primea * primea
lowercase__: Optional[int] = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }")
| 707
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """ernie_m"""
UpperCamelCase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 250_002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3_072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-0_5 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Tuple = max_position_embeddings
lowercase__: str = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: List[str] = classifier_dropout
lowercase__: Optional[Any] = is_decoder
lowercase__: Tuple = act_dropout
| 120
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = StableDiffusionXLImgaImgPipeline
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_snake_case = PipelineTesterMixin.required_optional_params - {"""latents"""}
_snake_case = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
_snake_case = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=A , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
snake_case : Optional[Any] = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
snake_case : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
snake_case : Tuple = CLIPTextModel(A )
snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A )
snake_case : Union[str, Any] = CLIPTextModelWithProjection(A )
snake_case : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=A )
snake_case : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> str:
snake_case : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
snake_case : Optional[Any] = image / 2 + 0.5
if str(A ).startswith("""mps""" ):
snake_case : Union[str, Any] = torch.manual_seed(A )
else:
snake_case : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
snake_case : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components()
snake_case : Any = StableDiffusionXLImgaImgPipeline(**A )
snake_case : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Union[str, Any] = sd_pipe(**A ).images
snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : str = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Dict:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = self.get_dummy_components()
snake_case : str = StableDiffusionXLImgaImgPipeline(**A )
snake_case : Any = sd_pipe.to(A )
snake_case : List[str] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
snake_case : int = self.get_dummy_inputs(A )
snake_case : Tuple = 3 * ["""this is a negative prompt"""]
snake_case : int = negative_prompt
snake_case : Tuple = 3 * [inputs["""prompt"""]]
snake_case : Union[str, Any] = sd_pipe(**A )
snake_case : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case : List[str] = self.get_dummy_inputs(A )
snake_case : List[Any] = 3 * ["""this is a negative prompt"""]
snake_case : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) : List[str] = sd_pipe.encode_prompt(A , negative_prompt=A )
snake_case : Dict = sd_pipe(
**A , prompt_embeds=A , negative_prompt_embeds=A , pooled_prompt_embeds=A , negative_pooled_prompt_embeds=A , )
snake_case : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A , A="cpu" , A=torch.floataa , A=0 ) -> Union[str, Any]:
snake_case : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
snake_case : Optional[int] = np.random.RandomState(A ).standard_normal((1, 4, 6_4, 6_4) )
snake_case : Any = torch.from_numpy(A ).to(device=A , dtype=A )
snake_case : Tuple = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : int = self.get_inputs(A )
snake_case : List[Any] = pipe(**A ).images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 587
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587
| 1
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Optional[int]:
def wrapper(*__lowerCAmelCase : int , **__lowerCAmelCase : Dict ):
snake_case = timeit.default_timer()
snake_case = func(*__lowerCAmelCase , **__lowerCAmelCase )
snake_case = timeit.default_timer() - starttime
return delta
snake_case = func.__name__
return wrapper
def __lowerCamelCase ( __lowerCAmelCase : dict , __lowerCAmelCase : Optional[Any]=1_00 , __lowerCAmelCase : Dict=None ) -> Any:
snake_case = []
snake_case = seq_shapes or {}
for i in range(__lowerCAmelCase ):
snake_case = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCAmelCase , _ArrayXD ):
snake_case = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
snake_case = """The small grey turtle was surprisingly fast when challenged."""
else:
snake_case = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCAmelCase , datasets.Sequence ):
while isinstance(__lowerCAmelCase , datasets.Sequence ):
snake_case = v.feature
snake_case = seq_shapes[k]
snake_case = np.random.rand(*__lowerCAmelCase ).astype(v.dtype )
snake_case = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int=1_00 , __lowerCAmelCase : int=None ) -> List[Any]:
snake_case = generate_examples(__lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes=__lowerCAmelCase )
with ArrowWriter(features=__lowerCAmelCase , path=__lowerCAmelCase ) as writer:
for key, record in dummy_data:
snake_case = features.encode_example(__lowerCAmelCase )
writer.write(__lowerCAmelCase )
snake_case , snake_case = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
snake_case = datasets.Dataset.from_file(filename=__lowerCAmelCase , info=datasets.DatasetInfo(features=__lowerCAmelCase ) )
return dataset
| 517
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_SCREAMING_SNAKE_CASE = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 517
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*UpperCamelCase : int ,**UpperCamelCase : Any ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' ,UpperCamelCase ,)
super().__init__(*UpperCamelCase ,**UpperCamelCase )
| 125
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = RobertaTokenizer
lowerCAmelCase__ : Tuple = RobertaTokenizerFast
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[int] = {"cls_token": "<s>"}
def _lowerCamelCase ( self : Any ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowercase : Tuple = dict(zip(UpperCamelCase ,range(len(UpperCamelCase ) ) ) )
_lowercase : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowercase : List[str] = {'unk_token': '<unk>'}
_lowercase : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def _lowerCamelCase ( self : Dict ,**UpperCamelCase : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : int ,**UpperCamelCase : List[Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase )
def _lowerCamelCase ( self : Union[str, Any] ,UpperCamelCase : Tuple ) -> Union[str, Any]:
_lowercase : int = 'lower newer'
_lowercase : Tuple = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_lowercase : Optional[Any] = 'lower newer'
_lowercase : Any = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowercase : Optional[int] = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowercase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) ,UpperCamelCase )
def _lowerCamelCase ( self : Any ) -> Union[str, Any]:
_lowercase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=UpperCamelCase ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def _lowerCamelCase ( self : Any ) -> Any:
_lowercase : Union[str, Any] = self.tokenizer_class.from_pretrained('roberta-base' )
_lowercase : List[str] = tokenizer.encode('sequence builders' ,add_special_tokens=UpperCamelCase )
_lowercase : Any = tokenizer.encode('multi-sequence build' ,add_special_tokens=UpperCamelCase )
_lowercase : Optional[Any] = tokenizer.encode(
'sequence builders' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : List[Any] = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_lowercase : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ,UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowerCamelCase ( self : int ) -> str:
_lowercase : Any = self.get_tokenizer()
_lowercase : Optional[Any] = 'Encode this sequence.'
_lowercase : Union[str, Any] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowercase : Dict = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : List[str] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase ,add_prefix_space=UpperCamelCase )
_lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowercase : Optional[Any] = tokenizer.encode(UpperCamelCase ,add_special_tokens=UpperCamelCase )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
# Testing spaces after special tokens
_lowercase : str = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase ,lstrip=UpperCamelCase ,rstrip=UpperCamelCase )} ) # mask token has a left space
_lowercase : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_lowercase : Any = 'Encode <mask> sequence'
_lowercase : Dict = 'Encode <mask>sequence'
_lowercase : int = tokenizer.encode(UpperCamelCase )
_lowercase : Optional[int] = encoded.index(UpperCamelCase )
_lowercase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase ,UpperCamelCase )
_lowercase : Dict = tokenizer.encode(UpperCamelCase )
_lowercase : Optional[Any] = encoded.index(UpperCamelCase )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase ,UpperCamelCase )
def _lowerCamelCase ( self : int ) -> Optional[Any]:
pass
def _lowerCamelCase ( self : Tuple ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = self.tokenizer_class.from_pretrained(UpperCamelCase ,**UpperCamelCase )
_lowercase : Any = 'A, <mask> AllenNLP sentence.'
_lowercase : Optional[int] = tokenizer_r.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
_lowercase : Any = tokenizer_p.encode_plus(UpperCamelCase ,add_special_tokens=UpperCamelCase ,return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
_lowercase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowercase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _lowerCamelCase ( self : Tuple ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
_lowercase : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowercase : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] ,UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] ,UpperCamelCase )
def _lowerCamelCase ( self : List[Any] ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : List[Any] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowercase : Optional[int] = F'''{text_of_1_token} {text_of_1_token}'''
_lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Dict = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Optional[int] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Optional[int] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowercase : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : int = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : List[str] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
_lowercase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase ,use_fast=UpperCamelCase ,add_prefix_space=UpperCamelCase ,trim_offsets=UpperCamelCase )
_lowercase : Union[str, Any] = tokenizer_r(UpperCamelCase ,return_offsets_mapping=UpperCamelCase ,add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) ,)
| 125
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 708
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
A : Optional[int] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
A : Dict = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
A : List[Any] = BeautifulSoup(res.text, 'html.parser')
A : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'https://google.com{link.get("href")}')
| 273
| 0
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : int = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
SCREAMING_SNAKE_CASE : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[str] =field(
default=lowercase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase__ )} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class UpperCamelCase :
'''simple docstring'''
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase : Optional[str] =field(default=lowercase__ , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
lowercase : Optional[str] =field(
default=lowercase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
lowercase : bool =field(
default=lowercase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase : Optional[int] =field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
lowercase : Optional[int] =field(
default=lowercase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase : float =field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
lowercase : bool =field(
default=lowercase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def UpperCamelCase ( self ):
if self.train_file is not None:
lowercase_ :Optional[int] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase_ :Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase ( _a , _a ) -> Tuple:
'''simple docstring'''
with open(_a , '''r''' , encoding='''utf-8''' ) as f:
lowercase_ :Optional[int] = [json.loads(_a ) for line in f.read().splitlines() if (len(_a ) > 0 and not line.isspace())]
assert len(_a ) == len(_a )
lowercase_ :Dict = {c: dataset[c] for c in dataset.column_names}
lowercase_ :Optional[int] = refs
return Dataset.from_dict(_a )
def UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowercase_ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ :Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ :str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase_ :Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ :Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ :List[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase_ :Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
lowercase_ :List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
lowercase_ :Tuple = {}
if data_args.train_file is not None:
lowercase_ :Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowercase_ :Optional[int] = data_args.validation_file
lowercase_ :Dict = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase_ :Any = '''text'''
lowercase_ :Optional[Any] = load_dataset(_a , data_files=_a )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ :Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase_ :List[Any] = AutoConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
lowercase_ :List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
lowercase_ :Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase_ :Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_a )
elif model_args.model_name_or_path:
lowercase_ :Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_a )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase_ :Optional[int] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ :Optional[int] = AutoModelForMaskedLM.from_config(_a )
model.resize_token_embeddings(len(_a ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase_ :List[Any] = datasets['''train'''].column_names
else:
lowercase_ :List[Any] = datasets['''validation'''].column_names
lowercase_ :Union[str, Any] = '''text''' if '''text''' in column_names else column_names[0]
lowercase_ :Optional[Any] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(_a ):
# Remove empty lines
lowercase_ :Optional[int] = [line for line in examples['''text'''] if len(_a ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=_a , truncation=_a , max_length=data_args.max_seq_length )
lowercase_ :List[Any] = datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase_ :int = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase_ :str = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase_ :Union[str, Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase_ :int = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase_ :Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=_a , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ :str = Trainer(
model=_a , args=_a , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase_ :Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase_ :Tuple = model_args.model_name_or_path
else:
lowercase_ :Any = None
lowercase_ :int = trainer.train(resume_from_checkpoint=_a )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase_ :Tuple = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(_a , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase_ :Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase_ :Union[str, Any] = trainer.evaluate()
lowercase_ :str = math.exp(eval_output['''eval_loss'''] )
lowercase_ :int = perplexity
lowercase_ :List[str] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 257
|
from typing import Any
import numpy as np
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
return np.array_equal(_a , matrix.conjugate().T )
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :str = v.conjugate().T
lowercase_ :int = v_star.dot(_a )
assert isinstance(_a , np.ndarray )
return (v_star_dot.dot(_a )) / (v_star.dot(_a ))
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :str = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase_ :Optional[int] = np.array([[1], [2], [3]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
print(rayleigh_quotient(_a , _a ) )
lowercase_ :Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_a ), f"{a} is not hermitian."
assert rayleigh_quotient(_a , _a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 257
| 1
|
'''simple docstring'''
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = "▁"
a_ = {"vocab_file": "prophetnet.tokenizer"}
a_ = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
a_ = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
a_ = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Any = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE, "r", encoding="utf-8" ) as reader:
snake_case_ : Optional[Any] = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ : str = token.rstrip("\n" )
snake_case_ : Any = index
return vocab
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , lowercase_ , lowercase_="[SEP]" , lowercase_="[SEP]" , lowercase_="[SEP]" , lowercase_="[UNK]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_ = None , **lowercase_ , ):
snake_case_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowercase_))
snake_case_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case_ : Optional[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10):
snake_case_ : List[Any] = F'[unused{i}]'
snake_case_ : str = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case_ : str = 12
snake_case_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_)
def __getstate__( self):
snake_case_ : List[str] = self.__dict__.copy()
snake_case_ : Dict = None
return state
def __setstate__( self , lowercase_):
snake_case_ : List[str] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece")
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
snake_case_ : Optional[Any] = {}
snake_case_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def snake_case__ ( self , lowercase_ , lowercase_ = None):
snake_case_ : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
return len(self.sp_model) + self.fairseq_offset
def snake_case__ ( self):
snake_case_ : int = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self , lowercase_):
return self.sp_model.encode(lowercase_ , out_type=lowercase_)
def snake_case__ ( self , lowercase_):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : List[Any] = self.sp_model.PieceToId(lowercase_)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self , lowercase_):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case__ ( self , lowercase_):
snake_case_ : Union[str, Any] = "".join(lowercase_).replace(lowercase_ , " ").strip()
return out_string
def snake_case__ ( self , lowercase_ , lowercase_ = None):
if not os.path.isdir(lowercase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
snake_case_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
def snake_case__ ( self , lowercase_ , lowercase_ = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case_ : int = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 92
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case_ : int = new_id
# turn into Numpy arrays
snake_case_ : int = np.array(__SCREAMING_SNAKE_CASE )
snake_case_ : Any = np.array(__SCREAMING_SNAKE_CASE )
if reduce_labels:
snake_case_ : str = 2_5_5
snake_case_ : str = label - 1
snake_case_ : List[Any] = 2_5_5
snake_case_ : Union[str, Any] = label != ignore_index
snake_case_ : Dict = np.not_equal(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = pred_label[mask]
snake_case_ : Any = np.array(__SCREAMING_SNAKE_CASE )[mask]
snake_case_ : List[str] = pred_label[pred_label == label]
snake_case_ : int = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : Tuple = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : List[Any] = np.histogram(__SCREAMING_SNAKE_CASE, bins=__SCREAMING_SNAKE_CASE, range=(0, num_labels - 1) )[0]
snake_case_ : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
snake_case_ : List[Any] = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : List[str] = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : int = np.zeros((num_labels,), dtype=np.floataa )
snake_case_ : int = np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ , snake_case_ : int = intersect_and_union(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = None, __SCREAMING_SNAKE_CASE = False, ):
"""simple docstring"""
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Tuple = total_intersect_and_union(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# compute metrics
snake_case_ : Union[str, Any] = {}
snake_case_ : int = total_area_intersect.sum() / total_area_label.sum()
snake_case_ : str = total_area_intersect / total_area_union
snake_case_ : Union[str, Any] = total_area_intersect / total_area_label
snake_case_ : List[str] = np.nanmean(__SCREAMING_SNAKE_CASE )
snake_case_ : str = np.nanmean(__SCREAMING_SNAKE_CASE )
snake_case_ : Any = all_acc
snake_case_ : Union[str, Any] = iou
snake_case_ : Tuple = acc
if nan_to_num is not None:
snake_case_ : str = {metric: np.nan_to_num(__SCREAMING_SNAKE_CASE, nan=__SCREAMING_SNAKE_CASE ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def snake_case__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
}) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def snake_case__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ):
snake_case_ : Optional[Any] = mean_iou(
results=lowercase_ , gt_seg_maps=lowercase_ , num_labels=lowercase_ , ignore_index=lowercase_ , nan_to_num=lowercase_ , label_map=lowercase_ , reduce_labels=lowercase_ , )
return iou_result
| 92
| 1
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=[30, 30] , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=10 , UpperCamelCase_=0.02 , UpperCamelCase_=3 , UpperCamelCase_=None , UpperCamelCase_=8 , UpperCamelCase_=10 , ) -> List[str]:
"""simple docstring"""
a_ : str = parent
a_ : Tuple = batch_size
a_ : str = image_size
a_ : Union[str, Any] = patch_size
a_ : Tuple = num_channels
a_ : str = is_training
a_ : List[Any] = use_labels
a_ : int = hidden_size
a_ : Union[str, Any] = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : int = intermediate_size
a_ : List[str] = hidden_act
a_ : Dict = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : int = type_sequence_label_size
a_ : Dict = initializer_range
a_ : Union[str, Any] = num_labels
a_ : Optional[Any] = scope
a_ : int = n_targets
a_ : Tuple = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a_ : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
a_ : Optional[int] = num_patches + 1 + self.num_detection_tokens
def A ( self ) -> int:
"""simple docstring"""
a_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
a_ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a_ : List[str] = []
for i in range(self.batch_size ):
a_ : Optional[int] = {}
a_ : List[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase_ )
a_ : Optional[int] = torch.rand(self.n_targets , 4 , device=UpperCamelCase_ )
labels.append(UpperCamelCase_ )
a_ : Any = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
a_ : Optional[Any] = YolosModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a_ : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
"""simple docstring"""
a_ : str = YolosForObjectDetection(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a_ : List[str] = model(pixel_values=UpperCamelCase_ )
a_ : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
a_ : int = model(pixel_values=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Tuple = self.prepare_config_and_inputs()
a_ , a_ , a_ : Optional[Any] = config_and_inputs
a_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __A , __A , unittest.TestCase ):
UpperCAmelCase : str = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Optional[Any] = False
def A ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> Any:
"""simple docstring"""
a_ : Any = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a_ : Union[str, Any] = []
for i in range(self.model_tester.batch_size ):
a_ : int = {}
a_ : List[str] = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase_ , dtype=torch.long )
a_ : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase_ , dtype=torch.float )
labels.append(UpperCamelCase_ )
a_ : Dict = labels
return inputs_dict
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : Union[str, Any] = YolosModelTester(self )
a_ : Dict = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def A ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ) -> Any:
"""simple docstring"""
pass
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Union[str, Any] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = model_class(UpperCamelCase_ )
a_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Any = [*signature.parameters.keys()]
a_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def A ( self ) -> Tuple:
"""simple docstring"""
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : List[Any] = True
# in YOLOS, the seq_len is different
a_ : Optional[int] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a_ : Union[str, Any] = True
a_ : Tuple = False
a_ : Union[str, Any] = True
a_ : List[str] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
a_ : int = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a_ : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : Tuple = True
a_ : Union[str, Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
a_ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a_ : List[Any] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a_ : Dict = len(UpperCamelCase_ )
# Check attention is always last and order is fine
a_ : Any = True
a_ : Optional[Any] = True
a_ : Optional[int] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
a_ : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a_ : str = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
a_ : Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def A ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
a_ : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
a_ : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a_ : Optional[int] = outputs.hidden_states
a_ : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# YOLOS has a different seq_length
a_ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : List[Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def A ( self ) -> Dict:
"""simple docstring"""
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase_ )
@slow
def A ( self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Dict = YolosModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def A ( self ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase_ )
a_ : Optional[Any] = self.default_image_processor
a_ : Dict = prepare_img()
a_ : Tuple = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
a_ : Tuple = model(inputs.pixel_values )
# verify outputs
a_ : Union[str, Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
a_ : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=UpperCamelCase_ , )
a_ : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ) )
# verify postprocessing
a_ : Tuple = image_processor.post_process_object_detection(
UpperCamelCase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
a_ : Tuple = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(UpperCamelCase_ )
a_ : List[Any] = [75, 75, 17, 63, 17]
a_ : Dict = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(UpperCamelCase_ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase_ , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase_ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase_ ) )
| 419
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class snake_case__ ( unittest.TestCase ):
def A ( self ) -> int:
"""simple docstring"""
a_ : List[str] = """ZinengTang/tvlt-base"""
a_ : Dict = tempfile.mkdtemp()
def A ( self , **UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self , **UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Optional[int] = self.get_feature_extractor()
a_ : List[str] = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase_ )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def A ( self ) -> Dict:
"""simple docstring"""
a_ : Any = self.get_image_processor()
a_ : Tuple = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Tuple = np.ones([12000] )
a_ : Tuple = feature_extractor(UpperCamelCase_ , return_tensors="""np""" )
a_ : List[Any] = processor(audio=UpperCamelCase_ , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> Optional[int]:
"""simple docstring"""
a_ : int = self.get_image_processor()
a_ : Dict = self.get_feature_extractor()
a_ : int = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : List[str] = np.ones([3, 224, 224] )
a_ : str = image_processor(UpperCamelCase_ , return_tensors="""np""" )
a_ : Tuple = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : Optional[int] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
a_ : Any = np.ones([12000] )
a_ : Optional[Any] = np.ones([3, 224, 224] )
a_ : Tuple = processor(audio=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = self.get_image_processor()
a_ : Union[str, Any] = self.get_feature_extractor()
a_ : Any = TvltProcessor(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 419
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , ):
snake_case_ : Optional[Any] = parent
snake_case_ : Any = batch_size
snake_case_ : Optional[Any] = image_size
snake_case_ : Tuple = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Union[str, Any] = is_training
snake_case_ : Dict = use_labels
snake_case_ : List[str] = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : int = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ : Tuple = (image_size // patch_size) ** 2
snake_case_ : Optional[int] = num_patches + 1
def snake_case__ ( self):
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ : Optional[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
return config, pixel_values
def snake_case__ ( self , lowercase_ , lowercase_):
snake_case_ : Dict = FlaxViTModel(config=__A)
snake_case_ : Dict = model(__A)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case_ : List[Any] = (self.image_size, self.image_size)
snake_case_ : Union[str, Any] = (self.patch_size, self.patch_size)
snake_case_ : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size))
def snake_case__ ( self , lowercase_ , lowercase_):
snake_case_ : Optional[Any] = self.type_sequence_label_size
snake_case_ : int = FlaxViTForImageClassification(config=__A)
snake_case_ : Any = model(__A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
snake_case_ : Tuple = 1
snake_case_ : Tuple = FlaxViTForImageClassification(__A)
snake_case_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ : Union[str, Any] = model(__A)
def snake_case__ ( self):
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
snake_case_
) : Optional[Any] = config_and_inputs
snake_case_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
UpperCAmelCase_ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def snake_case__ ( self):
snake_case_ : Union[str, Any] = FlaxViTModelTester(self)
snake_case_ : Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A)
def snake_case__ ( self):
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A)
def snake_case__ ( self):
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(__A)
snake_case_ : Union[str, Any] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A)
def snake_case__ ( self):
snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
snake_case_ : str = self._prepare_for_class(__A , __A)
snake_case_ : Optional[int] = model_class(__A)
@jax.jit
def model_jitted(lowercase_ , **lowercase_):
return model(pixel_values=__A , **__A)
with self.subTest("JIT Enabled"):
snake_case_ : Any = model_jitted(**__A).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
snake_case_ : str = model_jitted(**__A).to_tuple()
self.assertEqual(len(__A) , len(__A))
for jitted_output, output in zip(__A , __A):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
snake_case_ : List[str] = model_class_name.from_pretrained("google/vit-base-patch16-224")
snake_case_ : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24)))
self.assertIsNotNone(__A)
| 703
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92
| 0
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def a ( UpperCamelCase_ : bytes ) -> bytes:
if len(UpperCamelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
snake_case__ =b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a ( UpperCamelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
snake_case__ =format(UpperCamelCase_ , '08x' )[-8:]
snake_case__ =b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def a ( UpperCamelCase_ : bytes ) -> bytes:
snake_case__ =b''
for char in message:
bit_string += format(UpperCamelCase_ , '08b' ).encode('utf-8' )
snake_case__ =format(len(UpperCamelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a ( UpperCamelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCamelCase_ ) % 512 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCamelCase_ ) , 512 ):
snake_case__ =bit_string[pos : pos + 512]
snake_case__ =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a ( UpperCamelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
snake_case__ =format(UpperCamelCase_ , '032b' )
snake_case__ =''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase_ , 2 )
def a ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
return (a + b) % 2**32
def a ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a ( UpperCamelCase_ : bytes ) -> bytes:
snake_case__ =preprocess(UpperCamelCase_ )
snake_case__ =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
snake_case__ =0X67452301
snake_case__ =0Xefcdab89
snake_case__ =0X98badcfe
snake_case__ =0X10325476
snake_case__ =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase_ ):
snake_case__ =aa
snake_case__ =ba
snake_case__ =ca
snake_case__ =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
snake_case__ =d ^ (b & (c ^ d))
snake_case__ =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
snake_case__ =c ^ (d & (b ^ c))
snake_case__ =(5 * i + 1) % 16
elif i <= 47:
snake_case__ =b ^ c ^ d
snake_case__ =(3 * i + 5) % 16
else:
snake_case__ =c ^ (b | not_aa(UpperCamelCase_ ))
snake_case__ =(7 * i) % 16
snake_case__ =(f + a + added_consts[i] + block_words[g]) % 2**32
snake_case__ =d
snake_case__ =c
snake_case__ =b
snake_case__ =sum_aa(UpperCamelCase_ , left_rotate_aa(UpperCamelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
snake_case__ =sum_aa(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum_aa(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum_aa(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =sum_aa(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ =reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ ) + reformat_hex(UpperCamelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 538
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 538
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Tuple = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : int = type_vocab_size
_lowerCAmelCase : Union[str, Any] = type_sequence_label_size
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : List[str] = num_choices
_lowerCAmelCase : List[str] = scope
_lowerCAmelCase : Optional[Any] = self.vocab_size - 1
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : int = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
_lowerCAmelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = OpenAIGPTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ , token_type_ids=snake_case__ , head_mask=snake_case__ )
_lowerCAmelCase : int = model(snake_case__ , token_type_ids=snake_case__ )
_lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = OpenAIGPTLMHeadModel(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Any = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : List[str] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : str = OpenAIGPTForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Union[str, Any] = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : int = config_and_inputs
_lowerCAmelCase : Any = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__magic_name__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__magic_name__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def a ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
_lowerCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ , )
_lowerCAmelCase : Tuple = inputs_dict['labels']
_lowerCAmelCase : Dict = inputs_dict['labels']
_lowerCAmelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case__ , )
_lowerCAmelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = OpenAIGPTModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = OpenAIGPTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(snake_case__ )
_lowerCAmelCase : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case__ ) # the president is
_lowerCAmelCase : str = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
_lowerCAmelCase : Dict = model.generate(snake_case__ , do_sample=snake_case__ )
self.assertListEqual(output_ids[0].tolist() , snake_case__ )
| 630
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase : Tuple = False
lowerCAmelCase : str = True
lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase : Optional[int] = parser.parse_args()
lowerCAmelCase : int = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase : int = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase : Optional[Any] = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase : int = reader.read()
lowerCAmelCase : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase : str = UNetaDModel(**config)
else:
lowerCAmelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase : Dict = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase : str = config[key]
del config[key]
lowerCAmelCase : Optional[int] = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase : Dict = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase : Tuple = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase : str = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase : str = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase : Dict = param_value
lowerCAmelCase : Tuple = True
if not has_changed:
lowerCAmelCase : Tuple = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 630
| 1
|
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 0, 1
while True:
lowerCamelCase_ = b, a + b
yield b
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = fibonacci_generator()
while len(str(next(lowerCamelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 70
|
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _UpperCamelCase( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ):
import pyspark
def generate_fn():
__a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
__a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
__a : Optional[Any] = partition_df.collect()
__a : Union[str, Any] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class _UpperCamelCase( _BaseExamplesIterable ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ):
'''simple docstring'''
__a : List[str] = df
__a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
__a : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Tuple ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ):
'''simple docstring'''
__a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ )
@property
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.partition_order )
class _UpperCamelCase( datasets.DatasetBuilder ):
__SCREAMING_SNAKE_CASE : List[str] = SparkConfig
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
import pyspark
__a : int = pyspark.sql.SparkSession.builder.getOrCreate()
__a : Optional[int] = df
__a : List[Any] = working_dir
super().__init__(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(SCREAMING_SNAKE_CASE__ , 'a' )
return [probe_file]
if self._spark.conf.get('spark.master' , '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
__a : List[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
__a : List[str] = self.df.count()
__a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
__a : List[str] = (
self.df.limit(SCREAMING_SNAKE_CASE__ )
.repartition(1 )
.mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
__a : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
__a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) )
__a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
import pyspark
__a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter
__a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath
__a : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
__a : List[str] = self.config.features
__a : int = self._writer_batch_size
__a : Union[str, Any] = self._fs.storage_options
def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
__a : Any = pyspark.TaskContext().taskAttemptId()
__a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , )
__a : Any = 0
__a : List[str] = writer_class(
features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
__a , __a : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
shard_id += 1
__a : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = pa.Table.from_batches([batch] )
writer.write_table(SCREAMING_SNAKE_CASE__ )
if writer._num_bytes > 0:
__a , __a : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ):
__a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) )
shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = (
self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
self._validate_cache_dir()
__a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = not is_remote_filesystem(self._fs )
__a : Optional[Any] = os.path.join if is_local else posixpath.join
__a : Any = '-TTTTT-SSSSS-of-NNNNN'
__a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
__a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ )
__a : Any = 0
__a : Dict = 0
__a : int = 0
__a : List[str] = []
__a : Optional[int] = []
for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ )
__a : List[str] = total_num_examples
__a : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
__a : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
__a : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ):
rename(
SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , )
__a : Union[str, Any] = []
__a : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a , __a : Union[str, Any] = task_id_and_num_shards[i]
for shard_id in range(SCREAMING_SNAKE_CASE__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect()
else:
# don't use any pattern
__a : List[Any] = 0
__a : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 47
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] =logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] ={
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __A ( a ):
__A = """distilbert"""
__A = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , UpperCAmelCase_=30522 , UpperCAmelCase_=512 , UpperCAmelCase_=False , UpperCAmelCase_=6 , UpperCAmelCase_=12 , UpperCAmelCase_=768 , UpperCAmelCase_=4 * 768 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.2 , UpperCAmelCase_=0 , **UpperCAmelCase_ , ):
lowerCamelCase =vocab_size
lowerCamelCase =max_position_embeddings
lowerCamelCase =sinusoidal_pos_embds
lowerCamelCase =n_layers
lowerCamelCase =n_heads
lowerCamelCase =dim
lowerCamelCase =hidden_dim
lowerCamelCase =dropout
lowerCamelCase =attention_dropout
lowerCamelCase =activation
lowerCamelCase =initializer_range
lowerCamelCase =qa_dropout
lowerCamelCase =seq_classif_dropout
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ )
class __A ( a ):
@property
def _snake_case ( self ):
if self.task == "multiple-choice":
lowerCamelCase ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 716
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : List[Any] ={'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =['''YolosFeatureExtractor''']
UpperCAmelCase__ : List[Any] =['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] =[
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : List[Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _A ( lowerCamelCase__):
SCREAMING_SNAKE_CASE : Union[str, Any] = '''ctrl'''
SCREAMING_SNAKE_CASE : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=24_6534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = n_positions
SCREAMING_SNAKE_CASE_ : List[str] = n_embd
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : int = n_head
SCREAMING_SNAKE_CASE_ : Optional[Any] = dff
SCREAMING_SNAKE_CASE_ : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE_ : Any = embd_pdrop
SCREAMING_SNAKE_CASE_ : str = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : str = use_cache
super().__init__(**_A )
| 511
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE_ = {
"n_samples": 6_4,
"horizon": 3_2,
"num_inference_steps": 2_0,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = "hopper-medium-v2"
SCREAMING_SNAKE_CASE_ = gym.make(env_name)
SCREAMING_SNAKE_CASE_ = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE_ = env.reset()
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1_0_0_0
SCREAMING_SNAKE_CASE_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = env.step(denorm_actions)
SCREAMING_SNAKE_CASE_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 597
| 0
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_="attention"):
"""simple docstring"""
snake_case__ : int = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel']
snake_case__ : str = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel']
snake_case__ : Tuple = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel']
snake_case__ : List[str] = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False):
"""simple docstring"""
if split_mlp_wi:
snake_case__ : Any = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel']
snake_case__ : str = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel']
snake_case__ : List[str] = (wi_a, wi_a)
else:
snake_case__ : Optional[Any] = params[F'{prefix}/layers_{i}/mlp/wi/kernel']
snake_case__ : Tuple = params[F'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
return params[F'{prefix}/layers_{i}/{layer_name}/scale']
def _lowercase ( UpperCAmelCase_ , *, UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[int] = traverse_util.flatten_dict(variables["""target"""])
snake_case__ : Any = {"""/""".join(UpperCAmelCase_): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ : int = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCAmelCase_)
snake_case__ : Optional[int] = collections.OrderedDict()
# Shared embeddings.
snake_case__ : Optional[Any] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCAmelCase_):
# Block i, layer 0 (Self Attention).
snake_case__ : str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """encoder""" , """pre_attention_layer_norm""")
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """encoder""" , """attention""")
snake_case__ : Any = layer_norm
snake_case__ : Optional[int] = k.T
snake_case__ : Union[str, Any] = o.T
snake_case__ : Optional[Any] = q.T
snake_case__ : Tuple = v.T
# Block i, layer 1 (MLP).
snake_case__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """encoder""" , """pre_mlp_layer_norm""")
snake_case__ , snake_case__ : str = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """encoder""" , UpperCAmelCase_)
snake_case__ : int = layer_norm
if split_mlp_wi:
snake_case__ : List[Any] = wi[0].T
snake_case__ : Union[str, Any] = wi[1].T
else:
snake_case__ : int = wi.T
snake_case__ : Union[str, Any] = wo.T
snake_case__ : List[Any] = old[
"""encoder/relpos_bias/rel_embedding"""
].T
snake_case__ : List[Any] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase_):
# Block i, layer 0 (Self Attention).
snake_case__ : str = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , """pre_self_attention_layer_norm""")
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , """self_attention""")
snake_case__ : Union[str, Any] = layer_norm
snake_case__ : Dict = k.T
snake_case__ : List[Any] = o.T
snake_case__ : Optional[int] = q.T
snake_case__ : Any = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ : Dict = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , """pre_cross_attention_layer_norm""")
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = tax_attention_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , """encoder_decoder_attention""")
snake_case__ : int = layer_norm
snake_case__ : Any = k.T
snake_case__ : Tuple = o.T
snake_case__ : List[Any] = q.T
snake_case__ : int = v.T
# Block i, layer 2 (MLP).
snake_case__ : Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , """pre_mlp_layer_norm""")
snake_case__ , snake_case__ : str = tax_mlp_lookup(UpperCAmelCase_ , UpperCAmelCase_ , """decoder""" , UpperCAmelCase_)
snake_case__ : List[Any] = layer_norm
if split_mlp_wi:
snake_case__ : List[Any] = wi[0].T
snake_case__ : List[str] = wi[1].T
else:
snake_case__ : Dict = wi.T
snake_case__ : str = wo.T
snake_case__ : List[Any] = old["""decoder/decoder_norm/scale"""]
snake_case__ : Dict = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ : Any = old["""decoder/logits_dense/kernel"""].T
return new
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()])
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ : Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""")
snake_case__ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_)
snake_case__ : Dict = convert_tax_to_pytorch(UpperCAmelCase_ , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase_)
snake_case__ : Union[str, Any] = make_state_dict(UpperCAmelCase_ , UpperCAmelCase_)
model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_)
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False):
"""simple docstring"""
snake_case__ : Optional[int] = TaConfig.from_json_file(UpperCAmelCase_)
print(F'Building PyTorch model from configuration: {config}')
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ : List[Any] = TaEncoderModel(UpperCAmelCase_)
else:
snake_case__ : List[str] = TaForConditionalGeneration(UpperCAmelCase_)
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(UpperCAmelCase_)
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase_)
print("""Done""")
if __name__ == "__main__":
lowercase_: Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
lowercase_: Optional[Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 127
|
import colorsys
from PIL import Image # type: ignore
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
snake_case__ : List[Any] = x
snake_case__ : int = y
for step in range(UpperCAmelCase_): # noqa: B007
snake_case__ : str = a * a - b * b + x
snake_case__ : Tuple = 2 * a * b + y
snake_case__ : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(UpperCAmelCase_ , 1 , 1))
def _lowercase ( UpperCAmelCase_ = 800 , UpperCAmelCase_ = 600 , UpperCAmelCase_ = -0.6 , UpperCAmelCase_ = 0 , UpperCAmelCase_ = 3.2 , UpperCAmelCase_ = 50 , UpperCAmelCase_ = True , ):
"""simple docstring"""
snake_case__ : Dict = Image.new("""RGB""" , (image_width, image_height))
snake_case__ : Optional[Any] = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase_):
for image_y in range(UpperCAmelCase_):
# determine the figure-coordinates based on the image-coordinates
snake_case__ : Optional[int] = figure_width / image_width * image_height
snake_case__ : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case__ : str = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case__ : int = get_distance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case__ : List[str] = get_color_coded_rgb(UpperCAmelCase_)
else:
snake_case__ : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase_)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase_: List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 127
| 1
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A : int = random.Random()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase=1.0, _lowerCamelCase=None, _lowerCamelCase=None ) ->Tuple:
"""simple docstring"""
if rng is None:
__lowercase : Any = global_rng
__lowercase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=7 , lowercase__ : Tuple=4_0_0 , lowercase__ : Optional[int]=2_0_0_0 , lowercase__ : Dict=2_0_4_8 , lowercase__ : List[Any]=1_2_8 , lowercase__ : Optional[int]=1 , lowercase__ : Optional[Any]=5_1_2 , lowercase__ : Any=3_0 , lowercase__ : Optional[int]=4_4_1_0_0 , ):
__lowercase : List[Any] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[int] = min_seq_length
__lowercase : Dict = max_seq_length
__lowercase : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase : Union[str, Any] = spectrogram_length
__lowercase : Dict = feature_size
__lowercase : str = num_audio_channels
__lowercase : int = hop_length
__lowercase : Optional[Any] = chunk_length
__lowercase : str = sampling_rate
def snake_case ( self : Tuple ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case ( self : Union[str, Any] , lowercase__ : Optional[int]=False , lowercase__ : Dict=False ):
def _flatten(lowercase__ : int ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase : int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase : Union[str, Any] = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = TvltFeatureExtractor
def snake_case ( self : str ):
__lowercase : str = TvltFeatureExtractionTester(self )
def snake_case ( self : Optional[Any] ):
__lowercase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase__ , "spectrogram_length" ) )
self.assertTrue(hasattr(lowercase__ , "feature_size" ) )
self.assertTrue(hasattr(lowercase__ , "num_audio_channels" ) )
self.assertTrue(hasattr(lowercase__ , "hop_length" ) )
self.assertTrue(hasattr(lowercase__ , "chunk_length" ) )
self.assertTrue(hasattr(lowercase__ , "sampling_rate" ) )
def snake_case ( self : Union[str, Any] ):
__lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Union[str, Any] = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase : int = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase : Dict = feat_extract_first.to_dict()
__lowercase : List[Any] = feat_extract_second.to_dict()
__lowercase : Optional[Any] = dict_first.pop("mel_filters" )
__lowercase : int = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case ( self : List[str] ):
__lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Tuple = os.path.join(lowercase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowercase__ )
__lowercase : Dict = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase : Any = feat_extract_first.to_dict()
__lowercase : Tuple = feat_extract_second.to_dict()
__lowercase : Optional[int] = dict_first.pop("mel_filters" )
__lowercase : Union[str, Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(lowercase__ , lowercase__ ) )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case ( self : Dict ):
# Initialize feature_extractor
__lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__lowercase : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase : Optional[int] = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test not batched input
__lowercase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__lowercase : Optional[int] = feature_extractor(lowercase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__lowercase : List[Any] = feature_extractor(
lowercase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__lowercase : str = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase : Optional[Any] = np.asarray(lowercase__ )
__lowercase : Tuple = feature_extractor(lowercase__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] ):
__lowercase : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowercase : List[str] = ds.sort("id" ).select(range(lowercase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case ( self : Dict ):
__lowercase : Tuple = self._load_datasamples(1 )
__lowercase : List[str] = TvltFeatureExtractor()
__lowercase : List[str] = feature_extractor(lowercase__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
__lowercase : Union[str, Any] = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4 ) )
| 575
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCamelCase : List[Any] = True
except (ImportError, AttributeError):
UpperCamelCase : List[Any] = object
def UpperCamelCase_ ( *__a , **__a ) -> int:
pass
UpperCamelCase : List[Any] = False
UpperCamelCase : str = logging.get_logger("""transformers-cli/serving""")
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__a , args.host , args.port , args.workers )
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
_lowercase = 42
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 42
class A__ ( A__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : ArgumentParser ):
a__ : Dict = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=lowerCamelCase__ , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=lowerCamelCase__ , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=lowerCamelCase__ , default=8_888 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=lowerCamelCase__ , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=lowerCamelCase__ , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=lowerCamelCase__ , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=lowerCamelCase__ , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=lowerCamelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Union[str, Any] , lowerCamelCase__ : Pipeline , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : int ):
a__ : Union[str, Any] = pipeline
a__ : Optional[int] = host
a__ : str = port
a__ : List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'''Serving model over {host}:{port}''' )
a__ : Union[str, Any] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=lowerCamelCase__ , response_class=lowerCamelCase__ , methods=["POST"] , ),
] , timeout=600 , )
def _UpperCamelCase( self : Dict ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _UpperCamelCase( self : int ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : str = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
try:
a__ : Optional[int] = self._pipeline.tokenizer.tokenize(lowerCamelCase__ )
if return_ids:
a__ : Any = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
return ServeTokenizeResult(tokens=lowerCamelCase__ , tokens_ids=lowerCamelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(lowerCamelCase__ )} )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[int] = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , lowerCamelCase__ : bool = Body(lowerCamelCase__ , embed=lowerCamelCase__ ) , ):
try:
a__ : List[str] = self._pipeline.tokenizer.decode(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return ServeDeTokenizeResult(model="" , text=lowerCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={"model": "", "error": str(lowerCamelCase__ )} )
async def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[int]=Body(lowerCamelCase__ , embed=lowerCamelCase__ ) ):
# Check we don't have empty string
if len(lowerCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
a__ : Any = self._pipeline(lowerCamelCase__ )
return ServeForwardResult(output=lowerCamelCase__ )
except Exception as e:
raise HTTPException(500 , {"error": str(lowerCamelCase__ )} )
| 151
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCamelCase : Any = logging.get_logger(__name__)
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : float , **lowerCamelCase__ : List[Any] ):
a__ : int = feature_size
a__ : List[str] = sampling_rate
a__ : Optional[Any] = padding_value
a__ : List[Any] = kwargs.pop("padding_side" , "right" )
a__ : Optional[int] = kwargs.pop("return_attention_mask" , lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ : Optional[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
a__ : Union[str, Any] = processed_features[self.model_input_names[0]]
a__ : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
a__ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ : List[str] = required_input[0]
if isinstance(lowerCamelCase__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ : Tuple = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
a__ : Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
a__ : List[Any] = "tf"
elif is_torch_tensor(lowerCamelCase__ ):
a__ : int = "pt"
elif isinstance(lowerCamelCase__ , (int, float, list, tuple, np.ndarray) ):
a__ : str = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(lowerCamelCase__ )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ : str = to_numpy(lowerCamelCase__ )
else:
a__ : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ : Union[str, Any] = self._get_padding_strategies(padding=lowerCamelCase__ , max_length=lowerCamelCase__ )
a__ : Tuple = processed_features[self.model_input_names[0]]
a__ : Optional[Any] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ : Dict = []
for i in range(lowerCamelCase__ ):
a__ : int = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ : Any = self._truncate(
lowerCamelCase__ , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ : Any = PaddingStrategy.MAX_LENGTH
a__ : Optional[int] = {}
for i in range(lowerCamelCase__ ):
# padding
a__ : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ : Dict = []
if value.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ , tensor_type=lowerCamelCase__ )
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ : Tuple = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : Any = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ : Optional[int] = np.ones(len(lowerCamelCase__ ) , dtype=np.intaa )
if needs_to_be_padded:
a__ : List[str] = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
a__ : Tuple = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ : List[Any] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ : List[str] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ : List[str] = np.pad(
lowerCamelCase__ , lowerCamelCase__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ : Optional[Any] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ : List[str] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
a__ : Optional[int] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ : List[str] = processed_features["attention_mask"][:max_length]
return processed_features
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=None ):
# Get padding strategy
if padding is not False:
if padding is True:
a__ : Tuple = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Optional[Any] = padding
else:
a__ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 151
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
def __init__( self: Any , __UpperCamelCase: int , __UpperCamelCase: int , __UpperCamelCase: float , **__UpperCamelCase: int ):
_a = feature_size
_a = sampling_rate
_a = padding_value
_a = kwargs.pop('''padding_side''' , '''right''' )
_a = kwargs.pop('''return_attention_mask''' , __UpperCamelCase )
super().__init__(**__UpperCamelCase )
def _A ( self: Optional[Any] , __UpperCamelCase: Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __UpperCamelCase: Union[bool, str, PaddingStrategy] = True , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: bool = False , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_a = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
_a = processed_features[self.model_input_names[0]]
_a = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__UpperCamelCase ) == 0:
if return_attention_mask:
_a = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_a = required_input[0]
if isinstance(__UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_a = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__UpperCamelCase ):
_a = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__UpperCamelCase ):
_a = '''tf'''
elif is_torch_tensor(__UpperCamelCase ):
_a = '''pt'''
elif isinstance(__UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
_a = '''np'''
else:
raise ValueError(
f"type of {first_element} unknown: {type(__UpperCamelCase )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_a = to_numpy(__UpperCamelCase )
else:
_a = [to_numpy(__UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_a = self._get_padding_strategies(padding=__UpperCamelCase , max_length=__UpperCamelCase )
_a = processed_features[self.model_input_names[0]]
_a = len(__UpperCamelCase )
if not all(len(__UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_a = []
for i in range(__UpperCamelCase ):
_a = {k: v[i] for k, v in processed_features.items()}
# truncation
_a = self._truncate(
__UpperCamelCase , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , truncation=__UpperCamelCase , )
truncated_inputs.append(__UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_a = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_a = PaddingStrategy.MAX_LENGTH
_a = {}
for i in range(__UpperCamelCase ):
# padding
_a = self._pad(
truncated_inputs[i] , max_length=__UpperCamelCase , padding_strategy=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_a = []
if value.dtype is np.dtype(np.floataa ):
_a = value.astype(np.floataa )
batch_outputs[key].append(__UpperCamelCase )
return BatchFeature(__UpperCamelCase , tensor_type=__UpperCamelCase )
def _A ( self: Optional[int] , __UpperCamelCase: Union[Dict[str, np.ndarray], BatchFeature] , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , ):
_a = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_a = len(__UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_a = np.ones(len(__UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_a = max_length - len(__UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
_a = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_a = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_a = np.pad(
__UpperCamelCase , __UpperCamelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_a = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_a = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_a = np.pad(
__UpperCamelCase , __UpperCamelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def _A ( self: Any , __UpperCamelCase: Union[Dict[str, np.ndarray], BatchFeature] , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_a = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a = len(__UpperCamelCase ) > max_length
if needs_to_be_truncated:
_a = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_a = processed_features['''attention_mask'''][:max_length]
return processed_features
def _A ( self: Union[str, Any] , __UpperCamelCase: Any=False , __UpperCamelCase: Tuple=None ):
# Get padding strategy
if padding is not False:
if padding is True:
_a = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = PaddingStrategy(__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_a = padding
else:
_a = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 487
|
def __snake_case ( _UpperCamelCase ) -> list[int]:
if length <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 487
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 183
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def a_ ( lowercase_ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def a_ ( self ) -> Any:
raise NotImplementedError()
| 183
| 1
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self , _a , _a=99 , _a=13 , _a=16 , _a=7 , _a=True , _a=True , _a=True , _a=False , _a=True , _a=2 , _a=32 , _a=4 , _a=4 , _a=30 , _a=0 , _a=1 , _a=2 , _a=None , ) -> Any:
_A : Any = parent
_A : List[Any] = batch_size
_A : int = decoder_seq_length
# For common tests
_A : Union[str, Any] = self.decoder_seq_length
_A : Optional[Any] = is_training
_A : str = use_attention_mask
_A : str = use_labels
_A : Tuple = vocab_size
_A : Tuple = d_model
_A : Dict = d_model
_A : Optional[int] = decoder_layers
_A : Dict = decoder_layers
_A : Any = decoder_ffn_dim
_A : List[Any] = decoder_attention_heads
_A : Union[str, Any] = decoder_attention_heads
_A : str = eos_token_id
_A : List[str] = bos_token_id
_A : Dict = pad_token_id
_A : str = decoder_start_token_id
_A : Optional[int] = use_cache
_A : List[Any] = max_position_embeddings
_A : Optional[Any] = None
_A : Tuple = decoder_seq_length
_A : Tuple = 2
_A : List[Any] = 1
def a__ ( self ) -> int:
_A : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_A : Dict = None
if self.use_attention_mask:
_A : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_A : Optional[int] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_A : List[Any] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
_A : Union[str, Any] = True
_A : Union[str, Any] = TrOCRDecoder(config=_a ).to(_a ).eval()
_A : str = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_A : Dict = model(_a , use_cache=_a )
_A : Union[str, Any] = model(_a )
_A : Any = model(_a , use_cache=_a )
self.parent.assertTrue(len(_a ) == len(_a ) )
self.parent.assertTrue(len(_a ) == len(_a ) + 1 )
_A : Tuple = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_A : Tuple = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_A : str = torch.cat([input_ids, next_tokens] , dim=-1 )
_A : str = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = model(_a , past_key_values=_a )["""last_hidden_state"""]
# select random slice
_A : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_A : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_A : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_a , _a , atol=1e-3 )
def a__ ( self ) -> Optional[Any]:
_A : Optional[Any] = self.prepare_config_and_inputs()
_A , _A , _A , _A : List[Any] = config_and_inputs
_A : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a = (TrOCRForCausalLM,) if is_torch_available() else ()
_a = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
_a = True
_a = False
def a__ ( self ) -> Optional[Any]:
_A : int = TrOCRStandaloneDecoderModelTester(self , is_training=_a )
_A : Union[str, Any] = ConfigTester(self , config_class=_a )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> List[Any]:
pass
def a__ ( self ) -> int:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[str]:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_a )
def a__ ( self ) -> List[Any]:
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def a__ ( self ) -> Union[str, Any]:
pass
| 307
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_snake_case = {
"gpt-neox-20b": 2048,
}
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> str:
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
_A : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
_A : List[Any] = getattr(_a , pre_tok_state.pop("""type""" ) )
_A : List[str] = add_prefix_space
_A : int = pre_tok_class(**_a )
_A : Optional[Any] = add_prefix_space
def a__ ( self , _a , _a = None ) -> Tuple[str]:
_A : Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def a__ ( self , _a ) -> List[int]:
_A : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_A : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 307
| 1
|
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if not sentence:
return ""
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(zip(lowercase , lowercase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = str(lowercase )
return n == n[::-1]
def lowerCamelCase__ ( lowercase = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , lowercase ):
if is_palindrome(lowercase ) and is_palindrome(bin(lowercase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 488
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : List[str] = '''FlavaImageProcessor'''
SCREAMING_SNAKE_CASE__ : Any = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , snake_case : Tuple=None , snake_case : Optional[int]=None , **snake_case : Any ):
"""simple docstring"""
_snake_case : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
_snake_case : List[str] = kwargs.pop('feature_extractor' )
_snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
_snake_case : Tuple = self.image_processor
def __call__( self : Any , snake_case : Optional[ImageInput] = None , snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = False , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Optional[Any] , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_snake_case : Optional[Any] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
if images is not None:
_snake_case : Optional[Any] = self.image_processor(
snake_case , return_image_mask=snake_case , return_codebook_pixels=snake_case , return_tensors=snake_case , **snake_case , )
if text is not None and images is not None:
encoding.update(snake_case )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def __UpperCAmelCase ( self : List[Any] , *snake_case : Optional[int] , **snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def __UpperCAmelCase ( self : Optional[int] , *snake_case : int , **snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 517
|
'''simple docstring'''
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def lowerCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__=False , ) -> Optional[Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=a__ , exist_ok=a__)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , use_external_data_format=a__ , enable_onnx_checker=a__ , opset_version=a__ , )
else:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , opset_version=a__ , )
@torch.no_grad()
def lowerCamelCase__ ( a__ , a__ , a__ , a__ = False) -> List[str]:
"""simple docstring"""
_snake_case : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_snake_case : Optional[Any] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA')
else:
_snake_case : List[str] = 'cpu'
_snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=a__).to(a__)
_snake_case : Tuple = Path(a__)
# TEXT ENCODER
_snake_case : List[str] = pipeline.text_encoder.config.max_position_embeddings
_snake_case : Any = pipeline.text_encoder.config.hidden_size
_snake_case : Union[str, Any] = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=a__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a__ , dtype=torch.intaa)) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=a__ , )
del pipeline.text_encoder
# UNET
_snake_case : List[Any] = pipeline.unet.config.in_channels
_snake_case : List[Any] = pipeline.unet.config.sample_size
_snake_case : Dict = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a__ , a__ , a__).to(device=a__ , dtype=a__),
torch.randn(2).to(device=a__ , dtype=a__),
torch.randn(2 , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=a__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=a__ , use_external_data_format=a__ , )
_snake_case : Optional[Any] = str(unet_path.absolute().as_posix())
_snake_case : int = os.path.dirname(a__)
_snake_case : int = onnx.load(a__)
# clean up existing tensor files
shutil.rmtree(a__)
os.mkdir(a__)
# collate external tensor files into one
onnx.save_model(
a__ , a__ , save_as_external_data=a__ , all_tensors_to_one_file=a__ , location='weights.pb' , convert_attribute=a__ , )
del pipeline.unet
# VAE ENCODER
_snake_case : Optional[int] = pipeline.vae
_snake_case : List[str] = vae_encoder.config.in_channels
_snake_case : str = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_snake_case : str = lambda a__ , a__: vae_encoder.encode(a__ , a__)[0].sample()
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
# VAE DECODER
_snake_case : str = pipeline.vae
_snake_case : Tuple = vae_decoder.config.latent_channels
_snake_case : List[Any] = vae_decoder.config.out_channels
# forward only through the decoder part
_snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_snake_case : Optional[Any] = pipeline.safety_checker
_snake_case : List[str] = safety_checker.config.vision_config.num_channels
_snake_case : Tuple = safety_checker.config.vision_config.image_size
_snake_case : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a__ , a__ , a__ , ).to(device=a__ , dtype=a__),
torch.randn(1 , a__ , a__ , a__).to(device=a__ , dtype=a__),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=a__ , )
del pipeline.safety_checker
_snake_case : Any = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker')
_snake_case : Dict = pipeline.feature_extractor
else:
_snake_case : List[Any] = None
_snake_case : int = None
_snake_case : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet') , scheduler=pipeline.scheduler , safety_checker=a__ , feature_extractor=a__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a__)
print('ONNX pipeline saved to' , a__)
del pipeline
del onnx_pipeline
_snake_case : str = OnnxStableDiffusionPipeline.from_pretrained(a__ , provider='CPUExecutionProvider')
print('ONNX pipeline is loadable')
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 517
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Dict = '▁'
_snake_case : Tuple = {'vocab_file': 'spiece.model'}
_snake_case : Optional[Any] = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_snake_case : Union[str, Any] = {
'google/pegasus-xsum': 5_12,
}
_snake_case : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ =["""input_ids""", """attention_mask"""]
def __init__( self, _a, _a="<pad>", _a="</s>", _a="<unk>", _a="<mask_2>", _a="<mask_1>", _a=None, _a=1_03, _a = None, **_a, ) -> None:
__SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(_a, _a ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_a )}, but is'''
f''' {type(_a )}''' )
__SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_a ), self.offset - 1 )
]
if len(set(_a ) ) != len(_a ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
__SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2, self.offset )]
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_a, unk_token=_a, mask_token=_a, pad_token=_a, mask_token_sent=_a, offset=_a, additional_special_tokens=_a, sp_model_kwargs=self.sp_model_kwargs, **_a, )
__SCREAMING_SNAKE_CASE = mask_token_sent
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
# add special tokens to encoder dict
__SCREAMING_SNAKE_CASE = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1, self.offset - 1 )} )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.sp_model ) + self.offset
def __lowerCAmelCase ( self ) -> Dict[str, int]:
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self, _a ) -> str:
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self, _a ) -> List[str]:
return self.sp_model.encode(_a, out_type=_a )
def __lowerCAmelCase ( self, _a ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__SCREAMING_SNAKE_CASE = self.sp_model.piece_to_id(_a )
return sp_id + self.offset
def __lowerCAmelCase ( self, _a ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(index - self.offset )
return token
def __lowerCAmelCase ( self, _a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowerCAmelCase ( self, _a=False ) -> List[Any]:
return 1
def __lowerCAmelCase ( self, _a ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __lowerCAmelCase ( self, _a, _a = None, _a = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_a )
elif token_ids_a is None:
return self._special_token_mask(_a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowerCAmelCase ( self, _a, _a=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self, _a, _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE = os.path.join(
_a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a, "wb" ) as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 707
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ ="""new-model"""
if is_tf_available():
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =NewModelConfig
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = "bert-base-cased"
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForPreTraining.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> int:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForQuestionAnswering.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
@slow
@require_tensorflow_probability
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = TFAutoModelForTableQuestionAnswering.from_pretrained(
_a, output_loading_info=_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = TFAutoModelWithLMHead.from_pretrained(_a )
self.assertIsInstance(_a, _a )
self.assertEqual(model.num_parameters(), 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_a ), 1_44_10 )
def __lowerCAmelCase ( self ) -> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(_a, _a )
__SCREAMING_SNAKE_CASE = copy.deepcopy(model.config )
__SCREAMING_SNAKE_CASE = ["FunnelBaseModel"]
__SCREAMING_SNAKE_CASE = TFAutoModel.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a )
self.assertIsInstance(_a, _a )
def __lowerCAmelCase ( self ) -> str:
try:
AutoConfig.register("new-model", _a )
__SCREAMING_SNAKE_CASE = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
auto_class.register(_a, _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
auto_class.register(_a, _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = BertModelTester(self ).get_config()
__SCREAMING_SNAKE_CASE = NewModelConfig(**tiny_config.to_dict() )
__SCREAMING_SNAKE_CASE = auto_class.from_config(_a )
self.assertIsInstance(_a, _a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a )
__SCREAMING_SNAKE_CASE = auto_class.from_pretrained(_a )
self.assertIsInstance(_a, _a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
_a, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
_a, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained(_a, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
_a, "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin", ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_a, "Use `from_pt=True` to load this model" ):
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def __lowerCAmelCase ( self ) -> List[Any]:
# Make sure we have cached the model.
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
# With a sharded checkpoint
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__SCREAMING_SNAKE_CASE = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 214
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case :Any =logging.get_logger(__name__)
__snake_case :str ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[Any] ) -> Tuple:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A = self.model.config
else:
A = config
A = data_args
A = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A = label_smoothed_nll_loss
def __UpperCamelCase ( self : int , __UpperCamelCase : int ) -> str:
if self.optimizer is None:
A = ['bias', 'LayerNorm.weight']
A = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A = Adafactor
A = {'scale_parameter': False, 'relative_step': False}
else:
A = AdamW
A = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
A = self.args.learning_rate
if self.sharded_ddp:
A = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
A = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
A = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __UpperCamelCase ( self : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def __UpperCamelCase ( self : List[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> int:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A , A = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
A , A = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> List[Any]:
A = inputs.pop('labels' )
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def __UpperCamelCase ( self : int , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A = self._prepare_inputs(__UpperCamelCase )
A = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ) -> int:
# If PAD token is not defined at least EOS token has to be defined
A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f''' padded to `max_length`={max_length}''' )
A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A = tensor
return padded_tensor
| 106
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase__ : List[Any] = 4
UpperCAmelCase__ : Optional[Any] = 3
class __lowercase ( lowerCamelCase__ ):
pass
def A ( snake_case__ : List[str] ) -> str:
'''simple docstring'''
for shard in shards:
for i in range(snake_case__ ):
yield {"i": i, "shard": shard}
def A ( ) -> List[str]:
'''simple docstring'''
__snake_case = int(os.environ['RANK'] )
__snake_case = int(os.environ['WORLD_SIZE'] )
__snake_case = ArgumentParser()
parser.add_argument('--streaming' , type=snake_case__ )
parser.add_argument('--local_rank' , type=snake_case__ )
parser.add_argument('--num_workers' , type=snake_case__ , default=0 )
__snake_case = parser.parse_args()
__snake_case = args.streaming
__snake_case = args.num_workers
__snake_case = {'shards': [f"shard_{shard_idx}" for shard_idx in range(snake_case__ )]}
__snake_case = IterableDataset.from_generator(snake_case__ , gen_kwargs=snake_case__ )
if not streaming:
__snake_case = Dataset.from_list(list(snake_case__ ) )
__snake_case = split_dataset_by_node(snake_case__ , rank=snake_case__ , world_size=snake_case__ )
__snake_case = torch.utils.data.DataLoader(snake_case__ , num_workers=snake_case__ )
__snake_case = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 313
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( __SCREAMING_SNAKE_CASE ):
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 705
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__( enum.Enum ):
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = 2
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
A__ = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict:
A__ = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs['max_new_tokens']
else:
A__ = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A__ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs['attention_mask'][:, -keep_length:]
return inputs
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = model_inputs['input_ids']
A__ = model_inputs.get('attention_mask' ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
A__ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
A__ = model_outputs['generated_sequence'][0]
A__ = model_outputs['input_ids']
A__ = model_outputs['prompt_text']
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {'generated_text': all_text}
records.append(__UpperCAmelCase )
return records
| 536
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __snake_case ( _lowerCAmelCase : List[str]=None ) -> Union[str, Any]:
if subparsers is not None:
A_ : Union[str, Any] = subparsers.add_parser("test" )
else:
A_ : Union[str, Any] = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_SCREAMING_SNAKE_CASE , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def __snake_case ( _lowerCAmelCase : str ) -> List[Any]:
A_ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
A_ : Optional[int] = script_name
else:
A_ : List[str] = f"--config_file={args.config_file} {script_name}"
A_ : Optional[Any] = ["accelerate-launch"] + test_args.split()
A_ : int = execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __snake_case ( ) -> Tuple:
A_ : List[str] = test_command_parser()
A_ : List[Any] = parser.parse_args()
test_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 454
|
import functools
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """Pix2StructImageProcessor"""
lowerCamelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = False
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 2048 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_lowerCamelCase : Optional[Any] = self.tokenizer
_lowerCamelCase : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_lowerCamelCase : Union[str, Any] = self.image_processor(
lowercase , return_tensors=lowercase , max_patches=lowercase , **lowercase )
else:
# add pixel_values and bbox
_lowerCamelCase : Optional[Any] = self.image_processor(
lowercase , return_tensors=lowercase , max_patches=lowercase , header_text=lowercase , **lowercase )
if text is not None and not self.image_processor.is_vqa:
_lowerCamelCase : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if "attention_mask" in text_encoding:
_lowerCamelCase : List[Any] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_lowerCamelCase : str = text_encoding.pop('input_ids' )
else:
_lowerCamelCase : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer.model_input_names
_lowerCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 492
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
_lowerCamelCase : Optional[int] = 0
while n > 0:
res += n % 10
n //= 10
return res
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = abs(lowercase__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _snake_case ( lowercase__ ):
return sum(int(lowercase__ ) for c in str(abs(lowercase__ ) ) )
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowercase__ , lowercase__ ) -> None:
_lowerCamelCase : int = f'''{func.__name__}({value})'''
_lowerCamelCase : Optional[Any] = timeit(f'''__main__.{call}''' , setup='import __main__' )
print(f'''{call:56} = {func(lowercase__ )} -- {timing:.4f} seconds''' )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowercase__ , lowercase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 492
| 1
|
"""simple docstring"""
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> bool:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__: Optional[Any] = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__: str = False
for divisor in range(2 , int(round(sqrt(__UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__: Optional[int] = False
break
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'status' must been from type bool"
return status
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__: Optional[Any] = list(range(2 , n + 1 ) )
lowercase__: int = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__UpperCAmelCase ) ):
for j in range(i + 1 , len(__UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__: Union[str, Any] = 0
# filters actual prime numbers.
lowercase__: List[Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
lowercase__: Tuple = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__UpperCAmelCase ):
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
lowercase__: str = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__: str = 2
lowercase__: Optional[Any] = number
if number == 0 or number == 1:
ans.append(__UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__UpperCAmelCase ):
while quotient != 1:
if is_prime(__UpperCAmelCase ) and (quotient % factor == 0):
ans.append(__UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type list"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__: str = 0
# prime factorization of 'number'
lowercase__: Dict = prime_factorization(__UpperCAmelCase )
lowercase__: int = max(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__: Optional[Any] = 0
# prime factorization of 'number'
lowercase__: Any = prime_factorization(__UpperCAmelCase )
lowercase__: List[Any] = min(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'ans' must been from type int"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (number > 2) and is_even(__UpperCAmelCase )
), "'number' must been an int, even and > 2"
lowercase__: List[Any] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__: int = get_prime_numbers(__UpperCAmelCase )
lowercase__: Dict = len(__UpperCAmelCase )
# run variable for while-loops.
lowercase__: Optional[Any] = 0
lowercase__: Any = None
# exit variable. for break up the loops
lowercase__: Tuple = True
while i < len_pn and loop:
lowercase__: Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__: List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (len(__UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__: List[Any] = 0
while numbera != 0:
lowercase__: List[Any] = numbera % numbera
lowercase__: str = numbera
lowercase__: Optional[int] = rest
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__: str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__: Union[str, Any] = prime_factorization(__UpperCAmelCase )
lowercase__: List[Any] = prime_factorization(__UpperCAmelCase )
elif numbera == 1 or numbera == 1:
lowercase__: Union[str, Any] = []
lowercase__: Dict = []
lowercase__: Any = max(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = 0
lowercase__: List[str] = 0
lowercase__: Tuple = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__: int = prime_fac_a.count(__UpperCAmelCase )
lowercase__: Dict = prime_fac_a.count(__UpperCAmelCase )
for _ in range(max(__UpperCAmelCase , __UpperCAmelCase ) ):
ans *= n
else:
lowercase__: Any = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__: Union[str, Any] = prime_fac_a.count(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
ans *= n
done.append(__UpperCAmelCase )
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
lowercase__: Optional[Any] = 0
lowercase__: Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and is_prime(
__UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
assert (
is_prime(__UpperCAmelCase ) and is_prime(__UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__: Optional[Any] = p_number_a + 1 # jump to the next number
lowercase__: Union[str, Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(__UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(__UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(__UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
lowercase__: Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(__UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__: Any = get_divisors(__UpperCAmelCase )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(__UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__: Dict = gcd(abs(__UpperCAmelCase ) , abs(__UpperCAmelCase ) )
# precondition
assert (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__: Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__: Tuple = 0
lowercase__: Tuple = 1
lowercase__: List[str] = 1 # this will be return
for _ in range(n - 1 ):
lowercase__: int = ans
ans += fiba
lowercase__: Optional[int] = tmp
return ans
| 586
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["DeiTFeatureExtractor"]
__A = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586
| 1
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def UpperCAmelCase__ ( *snake_case__ : Any , **snake_case__ : List[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class a ( unittest.TestCase ):
lowercase_ : List[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase__ ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowerCAmelCase = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = object_detector(examples[0] , threshold=0.0 )
__lowerCAmelCase = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
"score": ANY(snake_case__ ),
"label": ANY(snake_case__ ),
"box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
__lowerCAmelCase = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
__lowerCAmelCase = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
__lowerCAmelCase = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = 0.2
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = 2
__lowerCAmelCase = pipeline("zero-shot-object-detection" )
__lowerCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 376
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
raise NotImplementedError()
| 376
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.