code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__a :Tuple = logging.get_logger(__name__)
__a :int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if config is None:
assert isinstance(self.model , UpperCAmelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A_ = self.model.config
else:
A_ = config
A_ = data_args
A_ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
" padding.." )
if self.args.label_smoothing == 0:
A_ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A_ = label_smoothed_nll_loss
def __A ( self : Dict , UpperCAmelCase : int ):
if self.optimizer is None:
A_ = ["bias", "LayerNorm.weight"]
A_ = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
A_ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A_ = Adafactor
A_ = {"scale_parameter": False, "relative_step": False}
else:
A_ = AdamW
A_ = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
A_ = self.args.learning_rate
if self.sharded_ddp:
A_ = OSS(
params=UpperCAmelCase , optim=UpperCAmelCase , **UpperCAmelCase , )
else:
A_ = optimizer_cls(UpperCAmelCase , **UpperCAmelCase )
if self.lr_scheduler is None:
A_ = self._get_lr_scheduler(UpperCAmelCase )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A_ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A_ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A_ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase )
return scheduler
def __A ( self : Optional[int] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : str ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A_ , A_ = model(**UpperCAmelCase , labels=UpperCAmelCase , use_cache=UpperCAmelCase )[:2]
else:
# compute label smoothed loss
A_ = model(**UpperCAmelCase , use_cache=UpperCAmelCase )[0]
A_ = torch.nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
A_ , A_ = self.loss_fn(UpperCAmelCase , UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __A ( self : int , UpperCAmelCase : str , UpperCAmelCase : int ):
A_ = inputs.pop("labels" )
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return loss
def __A ( self : Optional[Any] , UpperCAmelCase : nn.Module , UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase : bool , UpperCAmelCase : Optional[List[str]] = None , ):
A_ = self._prepare_inputs(UpperCAmelCase )
A_ = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A_ = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **UpperCAmelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
A_ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
A_ , A_ = self._compute_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A_ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A_ = self._pad_tensors_to_max_len(UpperCAmelCase , gen_kwargs["max_length"] )
return (loss, logits, labels)
def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
# If PAD token is not defined at least EOS token has to be defined
A_ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f''' padded to `max_length`={max_length}''' )
A_ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A_ = tensor
return padded_tensor
| 86
|
"""simple docstring"""
import sys
lowerCAmelCase__ = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def a__ ( SCREAMING_SNAKE_CASE : str = N ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = -sys.maxsize - 1
lowerCAmelCase : str = n[:1_3]
lowerCAmelCase : Tuple = 1_3
while cur_index < len(SCREAMING_SNAKE_CASE ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCAmelCase : Any = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCAmelCase : int = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Dict = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 645
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "biogpt"
def __init__( self , UpperCAmelCase=42384 , UpperCAmelCase=1024 , UpperCAmelCase=24 , UpperCAmelCase=16 , UpperCAmelCase=4096 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1024 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = scale_embedding
lowercase_ = use_cache
lowercase_ = layerdrop
lowercase_ = activation_dropout
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 709
|
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ = b * b - 4 * a * c
lowercase_ = (-b + sqrt(__lowerCamelCase )) / (2 * a)
lowercase_ = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ , lowercase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 601
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
# initialize config
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE__ = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE__ = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
SCREAMING_SNAKE_CASE__ = DetrConfig(use_timm_backbone=lowerCAmelCase_ , backbone_config=lowerCAmelCase_ )
# set label attributes
SCREAMING_SNAKE_CASE__ = '''panoptic''' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE__ = 2_5_0
else:
SCREAMING_SNAKE_CASE__ = 9_1
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = ''''''
if is_panoptic:
SCREAMING_SNAKE_CASE__ = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[:2_5_6, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[:2_5_6]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[-2_5_6:]
def __snake_case ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_detr_config(lowerCAmelCase_ )
# load original model from torch hub
SCREAMING_SNAKE_CASE__ = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f'''Converting model {model_name}...''' )
SCREAMING_SNAKE_CASE__ = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=lowerCAmelCase_ ).eval()
SCREAMING_SNAKE_CASE__ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCAmelCase_ ):
if is_panoptic:
SCREAMING_SNAKE_CASE__ = '''detr.''' + src
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase_ , is_panoptic=lowerCAmelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE__ = state_dict.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = DetrForSegmentation(lowerCAmelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE__ = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
SCREAMING_SNAKE_CASE__ = DetrImageProcessor(format=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE__ = detr(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(lowerCAmelCase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""detr-resnet-50""",
type=str,
choices=["""detr-resnet-50""", """detr-resnet-101"""],
help="""Name of the DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""")
_A : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 100
|
def __snake_case ( ) -> int:
return 1
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ = 2_0_0 ) -> int:
return two_pound(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 100
| 1
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def SCREAMING_SNAKE_CASE ( *snake_case):
with open(snake_case, '''r''') as fh:
fcntl.flock(snake_case, fcntl.LOCK_EX)
try:
print(*snake_case)
finally:
fcntl.flock(snake_case, fcntl.LOCK_UN)
__lowercase : Optional[Any] = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowercase : Optional[Any] = torch.device("cuda", local_rank)
__lowercase : List[Any] = socket.gethostname()
__lowercase : Tuple = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : Union[str, Any] = dist.get_rank()
__lowercase : List[Any] = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 93
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> List[str]:
__snake_case = tempfile.mkdtemp()
# fmt: off
__snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
__snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
__snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__snake_case = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def lowercase ( self : Optional[Any] , **A_ : Dict ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[int] , **A_ : str ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Any , **A_ : Tuple ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : int ) -> Optional[Any]:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def lowercase ( self : Union[str, Any] ) -> Any:
__snake_case = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase ( self : Any ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : List[str] ) -> List[Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''lower newer'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : List[Any] ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : Union[str, Any] ) -> Any:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = self.prepare_image_inputs()
__snake_case = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 93
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCamelCase ( lowercase : Any ) -> Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCAmelCase_ : List[str] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __SCREAMING_SNAKE_CASE (a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __a : Optional[Any] ):
_a = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=_lowercase , required=_lowercase , help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint" , type=_lowercase , required=_lowercase , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=_lowercase , required=_lowercase , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=_lowercase , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=_lowercase , default=_lowercase , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=_lowercase )
def __init__( self : Union[str, Any] , __a : str , __a : str , __a : str , __a : Optional[Any] , __a : int , *__a : Any , ):
_a = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def UpperCamelCase__ ( self : Any ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 692
|
"""simple docstring"""
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : str =(((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : int =[2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : Any =len(train_data)
SCREAMING_SNAKE_CASE__ : List[Any] =0.009
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="train" ) ->List[str]:
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) - output(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
_lowerCamelCase : int = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=m ) ->List[str]:
_lowerCamelCase : Tuple = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE_ )
else:
summation_value += _error(SCREAMING_SNAKE_CASE_ ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Optional[Any] = summation_of_cost_derivative(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / m
return cost_derivative_value
def UpperCamelCase ( ) ->Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : Dict = 0.000002
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Union[str, Any] = 0
while True:
j += 1
_lowerCamelCase : str = [0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
_lowerCamelCase : Optional[int] = get_cost_derivative(i - 1 )
_lowerCamelCase : Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ , rtol=SCREAMING_SNAKE_CASE_ , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def UpperCamelCase ( ) ->Optional[Any]:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
print(('''Actual output value:''', output(SCREAMING_SNAKE_CASE_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(SCREAMING_SNAKE_CASE_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 434
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657
| 1
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=7 , __lowerCAmelCase : int=3 , __lowerCAmelCase : int=18 , __lowerCAmelCase : Tuple=30 , __lowerCAmelCase : List[str]=4_00 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , __lowerCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , __lowerCAmelCase : Tuple=False , ) -> List[str]:
_A = size if size is not None else {'''height''': 20, '''width''': 20}
_A = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_reduce_labels
def snake_case_ ( self : Optional[int] ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(dataset[0]['''file'''] )
_A = Image.open(dataset[1]['''file'''] )
return image, map
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] )
_A = Image.open(ds[1]['''file'''] )
_A = Image.open(ds[2]['''file'''] )
_A = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase__ ( _A , unittest.TestCase):
"""simple docstring"""
a__ : List[Any] = BeitImageProcessor if is_vision_available() else None
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
_A = BeitImageProcessingTester(self )
@property
def snake_case_ ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCAmelCase , '''image_std''' ) )
def snake_case_ ( self : int ) -> List[str]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCAmelCase )
def snake_case_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def snake_case_ ( self : Union[str, Any] ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : Optional[Any] ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[Any] ) -> Union[str, Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : int ) -> str:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
_A = []
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
_A , _A = prepare_semantic_batch_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def snake_case_ ( self : List[str] ) -> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
_A = True
_A = image_processing(__lowerCAmelCase , __lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 2
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Any = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = ['GLPNFeatureExtractor']
__magic_name__ : Union[str, Any] = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 281
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 49
|
def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49
| 1
|
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
lowercase__ : str = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
lowercase__ : List[str] = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
lowercase__ : List[Any] = val
return f[i][j]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
lowercase__ : List[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
lowercase__ : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
lowercase__ : str = len(lowercase_ )
if num_items != len(lowercase_ ):
lowercase__ : Optional[int] = (
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(lowercase_ )} values'
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
lowercase__ : int = (
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowercase_ )
lowercase__ , lowercase__ : Tuple = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : set = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
lowerCamelCase__ : Dict = [3, 2, 4, 4]
lowerCamelCase__ : List[Any] = [4, 3, 2, 3]
lowerCamelCase__ : Optional[int] = 4
lowerCamelCase__ : Dict = 6
lowerCamelCase__ : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowerCamelCase__ , lowerCamelCase__ : int = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 12
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase__ : Union[str, Any] = "CompVis/stable-diffusion-v1-1"
lowerCamelCase__ : Optional[Any] = "CompVis/stable-diffusion-v1-2"
lowerCamelCase__ : Dict = "CompVis/stable-diffusion-v1-3"
lowerCamelCase__ : List[str] = "CompVis/stable-diffusion-v1-4"
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self :Any , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :CLIPTextModel , lowerCamelCase_ :CLIPTokenizer , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase_ :StableDiffusionSafetyChecker , lowerCamelCase_ :CLIPImageProcessor , lowerCamelCase_ :bool = True , ) -> List[str]:
'''simple docstring'''
super()._init_()
SCREAMING_SNAKE_CASE : Tuple = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self :Dict ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Optional[Union[str, int]] = "auto" ) -> Tuple:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def __lowerCAmelCase ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def __lowerCAmelCase ( self :Union[str, Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[str] , ) -> Tuple:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Tuple , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Dict , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :int , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :List[Any] , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def __lowerCAmelCase ( self :Optional[Any] , lowerCamelCase_ :Union[str, List[str]] , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 5_12 , lowerCamelCase_ :int = 50 , lowerCamelCase_ :float = 7.5 , lowerCamelCase_ :Optional[Union[str, List[str]]] = None , lowerCamelCase_ :Optional[int] = 1 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :Optional[torch.Generator] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , lowerCamelCase_ :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase_ :int = 1 , **lowerCamelCase_ :Optional[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE : str = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE : Tuple = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE : Union[str, Any] = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 698
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
__SCREAMING_SNAKE_CASE : List[Any] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(_lowerCamelCase ) , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , x.transpose() ) )
__SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : str = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , transpose(_lowerCamelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , transpose(_lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : int = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : str = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase ) , np.asarray(transpose(_lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(transpose(_lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.reshape(_lowerCamelCase , (4, 3) ) ) )
__SCREAMING_SNAKE_CASE : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (1_2, 5) ) , np.reshape(_lowerCamelCase , (1_2, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Any = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : str = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE : Dict = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (1_2, 5) ) , reshape(_lowerCamelCase , (1_2, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , reshape(_lowerCamelCase , (4, 3) ).numpy() ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : int = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (1_2, 5) ) , reshape(_lowerCamelCase , (1_2, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Tuple = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (4, 3) ) , np.asarray(reshape(_lowerCamelCase , (4, 3) ) ) ) )
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 , 5 )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(reshape(_lowerCamelCase , (1_2, 5) ) , np.asarray(reshape(_lowerCamelCase , (1_2, 5) ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
__SCREAMING_SNAKE_CASE : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.squeeze(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Tuple = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.squeeze(_lowerCamelCase , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : str = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE : int = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : Any = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : List[Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , squeeze(_lowerCamelCase ).numpy() ) )
__SCREAMING_SNAKE_CASE : Dict = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , squeeze(_lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase ) , np.asarray(squeeze(_lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : Any = np.random.randn(1 , 4 , 1 , 5 )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(squeeze(_lowerCamelCase , axis=2 ) , np.asarray(squeeze(_lowerCamelCase , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.expand_dims(_lowerCamelCase , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : str = tf.constant(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , expand_dims(_lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : List[str] = np.random.randn(3 , 4 )
__SCREAMING_SNAKE_CASE : List[Any] = jnp.array(_lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(_lowerCamelCase , axis=1 ) , np.asarray(expand_dims(_lowerCamelCase , axis=1 ) ) ) )
| 712
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_lowerCamelCase = True
except ImportError:
_lowerCamelCase = False
try:
from torch.hub import _get_torch_home
_lowerCamelCase = _get_torch_home()
except ImportError:
_lowerCamelCase = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
_lowerCamelCase = os.path.join(torch_cache_home, '''transformers''')
_lowerCamelCase = '''https://cdn.huggingface.co'''
_lowerCamelCase = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
_lowerCamelCase = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
_lowerCamelCase = os.path.join(PATH, '''config.yaml''')
_lowerCamelCase = os.path.join(PATH, '''attributes.txt''')
_lowerCamelCase = os.path.join(PATH, '''objects.txt''')
_lowerCamelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
_lowerCamelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
_lowerCamelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
_lowerCamelCase = '''pytorch_model.bin'''
_lowerCamelCase = '''config.yaml'''
def lowerCAmelCase_ ( lowercase_ : Optional[Any]=OBJECTS , lowercase_ : Optional[int]=ATTRIBUTES ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__SCREAMING_SNAKE_CASE : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase_ ( lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
with open(lowercase_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = pkl.load(lowercase_ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__SCREAMING_SNAKE_CASE : Dict = ckp.pop(lowercase_ )
if isinstance(lowercase_ , np.ndarray ):
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ , torch.tensor ), type(lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = v
return r
class snake_case :
lowerCamelCase__ = {}
def __init__( self :Dict , _lowerCamelCase :dict , _lowerCamelCase :str = "root" , _lowerCamelCase :Any=0 ):
__SCREAMING_SNAKE_CASE : int = name
__SCREAMING_SNAKE_CASE : Tuple = level
__SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = Config(_lowerCamelCase , name=_lowerCamelCase , level=level + 1 )
__SCREAMING_SNAKE_CASE : Tuple = v
setattr(self , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = d
def __repr__( self :List[str] ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self :Dict , _lowerCamelCase :int , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Any = val
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = len(_lowerCamelCase ) - 1
__SCREAMING_SNAKE_CASE : int = self._pointer
if len(_lowerCamelCase ) > 1:
for i, l in enumerate(_lowerCamelCase ):
if hasattr(self , _lowerCamelCase ) and isinstance(getattr(self , _lowerCamelCase ) , _lowerCamelCase ):
setattr(getattr(self , _lowerCamelCase ) , '''.'''.join(levels[i:] ) , _lowerCamelCase )
if l == last_level:
__SCREAMING_SNAKE_CASE : Optional[Any] = val
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = pointer[l]
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
return self._pointer
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] ):
with open(f'''{file_name}''' , '''w''' ) as stream:
dump(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :List[str] ):
with open(f'''{file_name}''' , '''w''' ) as stream:
json.dump(_lowerCamelCase , _lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :Optional[int] ):
with open(_lowerCamelCase ) as stream:
__SCREAMING_SNAKE_CASE : Dict = load(_lowerCamelCase , Loader=_lowerCamelCase )
return data
def __str__( self :int ):
__SCREAMING_SNAKE_CASE : Dict = ''' '''
if self._name != "root":
__SCREAMING_SNAKE_CASE : int = f'''{t * (self._level-1)}{self._name}:\n'''
else:
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(_lowerCamelCase ).__name__})\n'''
__SCREAMING_SNAKE_CASE : List[Any] = level
return r[:-1]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :int , _lowerCamelCase :str , **_lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase )
return cls(_lowerCamelCase )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Optional[Any] , _lowerCamelCase :str , **_lowerCamelCase :Dict ):
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''cache_dir''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = kwargs.pop('''force_download''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = kwargs.pop('''resume_download''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''proxies''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = kwargs.pop('''local_files_only''' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = os.path.join(_lowerCamelCase , _lowerCamelCase )
elif os.path.isfile(_lowerCamelCase ) or is_remote_url(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Tuple = pretrained_model_name_or_path
else:
__SCREAMING_SNAKE_CASE : str = hf_bucket_url(_lowerCamelCase , filename=_lowerCamelCase , use_cdn=_lowerCamelCase )
try:
# Load from URL or cache if already cached
__SCREAMING_SNAKE_CASE : Optional[Any] = cached_path(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , local_files_only=_lowerCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__SCREAMING_SNAKE_CASE : Optional[int] = Config.load_yaml(_lowerCamelCase )
except EnvironmentError:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Can\'t load config for'''
raise EnvironmentError(_lowerCamelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_lowerCamelCase ), kwargs
def lowerCAmelCase_ ( lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load('''dump.pt''' , map_location=in_tensor.device )
__SCREAMING_SNAKE_CASE : List[str] = in_tensor.numpy()
__SCREAMING_SNAKE_CASE : List[str] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase_ , lowercase_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ , lowercase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowerCAmelCase_ ( lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : str , lowercase_ : Optional[Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__SCREAMING_SNAKE_CASE : Any = '''/''' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase_ ( lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str]=None , lowercase_ : int=0 , lowercase_ : Union[str, Any]=None , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ , lowercase_ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowercase_ , lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ , lowercase_ ):
ua += "; " + user_agent
__SCREAMING_SNAKE_CASE : int = {'''user-agent''': ua}
if resume_size > 0:
__SCREAMING_SNAKE_CASE : Any = '''bytes=%d-''' % (resume_size,)
__SCREAMING_SNAKE_CASE : Any = requests.get(lowercase_ , stream=lowercase_ , proxies=lowercase_ , headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
__SCREAMING_SNAKE_CASE : int = response.headers.get('''Content-Length''' )
__SCREAMING_SNAKE_CASE : int = resume_size + int(lowercase_ ) if content_length is not None else None
__SCREAMING_SNAKE_CASE : str = tqdm(
unit='''B''' , unit_scale=lowercase_ , total=lowercase_ , initial=lowercase_ , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : int=None , lowercase_ : str=False , lowercase_ : Union[str, Any]=None , lowercase_ : List[Any]=10 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=None , lowercase_ : int=False , ):
'''simple docstring'''
if cache_dir is None:
__SCREAMING_SNAKE_CASE : Tuple = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
__SCREAMING_SNAKE_CASE : Dict = None
if not local_files_only:
try:
__SCREAMING_SNAKE_CASE : Tuple = requests.head(lowercase_ , allow_redirects=lowercase_ , proxies=lowercase_ , timeout=lowercase_ )
if response.status_code == 200:
__SCREAMING_SNAKE_CASE : Union[str, Any] = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__SCREAMING_SNAKE_CASE : Optional[Any] = url_to_filename(lowercase_ , lowercase_ )
# get cache path to put the file
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(lowercase_ , lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
__SCREAMING_SNAKE_CASE : int = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__SCREAMING_SNAKE_CASE : str = cache_path + '''.lock'''
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__SCREAMING_SNAKE_CASE : Any = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowercase_ , '''a+b''' ) as f:
yield f
__SCREAMING_SNAKE_CASE : Any = _resumable_file_manager
if os.path.exists(lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = os.stat(lowercase_ ).st_size
else:
__SCREAMING_SNAKE_CASE : Dict = 0
else:
__SCREAMING_SNAKE_CASE : Optional[int] = partial(tempfile.NamedTemporaryFile , dir=lowercase_ , delete=lowercase_ )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowercase_ , temp_file.name , )
http_get(
lowercase_ , lowercase_ , proxies=lowercase_ , resume_size=lowercase_ , user_agent=lowercase_ , )
os.replace(temp_file.name , lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = {'''url''': url, '''etag''': etag}
__SCREAMING_SNAKE_CASE : int = cache_path + '''.json'''
with open(lowercase_ , '''w''' ) as meta_file:
json.dump(lowercase_ , lowercase_ )
return cache_path
def lowerCAmelCase_ ( lowercase_ : Dict , lowercase_ : List[Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = url.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Dict = shaaaa(lowercase_ )
__SCREAMING_SNAKE_CASE : Tuple = url_hash.hexdigest()
if etag:
__SCREAMING_SNAKE_CASE : Tuple = etag.encode('''utf-8''' )
__SCREAMING_SNAKE_CASE : str = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : str=False , lowercase_ : List[str]=None , lowercase_ : Any=False , lowercase_ : str=None , lowercase_ : int=False , lowercase_ : Any=False , lowercase_ : Union[str, Any]=False , ):
'''simple docstring'''
if cache_dir is None:
__SCREAMING_SNAKE_CASE : List[str] = TRANSFORMERS_CACHE
if isinstance(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = str(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
__SCREAMING_SNAKE_CASE : Any = get_from_cache(
lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , proxies=lowercase_ , resume_download=lowercase_ , user_agent=lowercase_ , local_files_only=lowercase_ , )
elif os.path.exists(lowercase_ ):
# File, and it exists.
__SCREAMING_SNAKE_CASE : List[Any] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowercase_ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = os.path.split(lowercase_ )
__SCREAMING_SNAKE_CASE : str = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__SCREAMING_SNAKE_CASE : int = os.path.join(lowercase_ , lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_path + '''.lock'''
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ , ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ , '''r''' ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
__SCREAMING_SNAKE_CASE : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowercase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase_ ( lowercase_ : List[Any] , lowercase_ : str="," ):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
__SCREAMING_SNAKE_CASE : Any = eval(f.read() )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = requests.get(lowercase_ )
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = requests.json()
except Exception:
__SCREAMING_SNAKE_CASE : List[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__SCREAMING_SNAKE_CASE : List[Any] = eval(lowercase_ )
except Exception:
__SCREAMING_SNAKE_CASE : List[str] = data.split('''\n''' )
req.close()
return data
def lowerCAmelCase_ ( lowercase_ : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = requests.get(lowercase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase_ ( lowercase_ : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ , '''rb''' ) as stream:
__SCREAMING_SNAKE_CASE : List[Any] = pkl.load(lowercase_ )
__SCREAMING_SNAKE_CASE : List[str] = weights.pop('''model''' )
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in model.items():
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(lowercase_ )
if "running_var" in k:
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace('''running_var''' , '''num_batches_tracked''' )
__SCREAMING_SNAKE_CASE : Optional[int] = zero
return new
def lowerCAmelCase_ ( ):
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(lowercase_ , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase_ ( lowercase_ : str , lowercase_ : int="RGB" ):
'''simple docstring'''
assert isinstance(lowercase_ , lowercase_ )
if os.path.isfile(lowercase_ ):
__SCREAMING_SNAKE_CASE : List[Any] = cva.imread(lowercase_ )
else:
__SCREAMING_SNAKE_CASE : Dict = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
__SCREAMING_SNAKE_CASE : Dict = cva.cvtColor(lowercase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__SCREAMING_SNAKE_CASE : List[Any] = img[:, :, ::-1]
return img
def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : Tuple=1 ):
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowercase_ ) , lowercase_ ))
| 401
| 0
|
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = botoa.client("""iam""" )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCamelCase_ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) )
UpperCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCamelCase_ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(UpperCamelCase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=UpperCamelCase_ )["Role"]["Arn"]
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , UpperCamelCase_ , )
UpperCAmelCase = None
if credentials_configuration == 0:
UpperCAmelCase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
UpperCAmelCase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
UpperCAmelCase = _ask_field("""AWS Access Key ID: """ )
UpperCAmelCase = aws_access_key_id
UpperCAmelCase = _ask_field("""AWS Secret Access Key: """ )
UpperCAmelCase = aws_secret_access_key
UpperCAmelCase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
UpperCAmelCase = aws_region
UpperCAmelCase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , UpperCamelCase_ , )
if role_management == 0:
UpperCAmelCase = _ask_field("""Enter your IAM role name: """ )
else:
UpperCAmelCase = """accelerate_sagemaker_execution_role"""
print(F'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' )
_create_iam_role_for_sagemaker(UpperCamelCase_ )
UpperCAmelCase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase = None
if is_custom_docker_image:
UpperCAmelCase = _ask_field("""Enter your Docker image: """ , lambda lowerCAmelCase : str(UpperCamelCase_ ).lower() )
UpperCAmelCase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase = None
if is_sagemaker_inputs_enabled:
UpperCAmelCase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda lowerCAmelCase : str(UpperCamelCase_ ).lower() , )
UpperCAmelCase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase = None
if is_sagemaker_metrics_enabled:
UpperCAmelCase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda lowerCAmelCase : str(UpperCamelCase_ ).lower() , )
UpperCAmelCase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
UpperCAmelCase = {}
UpperCAmelCase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
if use_dynamo:
UpperCAmelCase = """dynamo_"""
UpperCAmelCase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
UpperCAmelCase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
if use_custom_options:
UpperCAmelCase = _ask_options(
"""Which mode do you want to use?""" , UpperCamelCase_ , lambda lowerCAmelCase : TORCH_DYNAMO_MODES[int(UpperCamelCase_ )] , default="""default""" , )
UpperCAmelCase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=UpperCamelCase_ , error_message="""Please enter yes or no.""" , )
UpperCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
UpperCAmelCase = _ask_options(
UpperCamelCase_ , UpperCamelCase_ , lambda lowerCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
UpperCAmelCase = _ask_field(UpperCamelCase_ , lambda lowerCAmelCase : str(UpperCamelCase_ ).lower() , default="""ml.p3.2xlarge""" )
UpperCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
UpperCAmelCase = _ask_field(
"""How many machines do you want use? [1]: """ , UpperCamelCase_ , default=1 , )
UpperCAmelCase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=UpperCamelCase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase_ , use_cpu=UpperCamelCase_ , dynamo_config=UpperCamelCase_ , eca_instance_type=UpperCamelCase_ , profile=UpperCamelCase_ , region=UpperCamelCase_ , iam_role_name=UpperCamelCase_ , mixed_precision=UpperCamelCase_ , num_machines=UpperCamelCase_ , sagemaker_inputs_file=UpperCamelCase_ , sagemaker_metrics_file=UpperCamelCase_ , )
| 673
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__magic_name__ = "<<<<<<< This should probably be modified because it mentions: "
__magic_name__ = "=======\n>>>>>>>\n"
__magic_name__ = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__magic_name__ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def _lowerCAmelCase ( UpperCamelCase_ ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@staticmethod
def snake_case_ ( lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to the HuggingFace Datasets folder.""")
train_parser.set_defaults(func=lowerCAmelCase__)
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = get_logger("""datasets-cli/converting""")
__SCREAMING_SNAKE_CASE = tfds_path
__SCREAMING_SNAKE_CASE = datasets_directory
def snake_case_ ( self):
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path)
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""")
__SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory)
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
if os.path.isdir(self._tfds_path):
__SCREAMING_SNAKE_CASE = os.listdir(lowerCAmelCase__)
else:
__SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
if not os.path.isfile(lowerCAmelCase__) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""")
continue
with open(lowerCAmelCase__ , encoding="""utf-8""") as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
for line in lines:
__SCREAMING_SNAKE_CASE = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
__SCREAMING_SNAKE_CASE = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
__SCREAMING_SNAKE_CASE = """"""
continue
elif "from absl import logging" in out_line:
__SCREAMING_SNAKE_CASE = """from datasets import logging\n"""
elif "getLogger" in out_line:
__SCREAMING_SNAKE_CASE = out_line.replace("""getLogger""" , """get_logger""")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = list(filter(lambda lowerCAmelCase__: e in out_line , lowerCAmelCase__))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase__) + """\n""")
out_lines.append(lowerCAmelCase__)
out_lines.append(lowerCAmelCase__)
continue
else:
for pattern, replacement in TO_CONVERT:
__SCREAMING_SNAKE_CASE = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
__SCREAMING_SNAKE_CASE = re.match(R"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , lowerCAmelCase__)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(""","""))
__SCREAMING_SNAKE_CASE = """from . import """ + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
__SCREAMING_SNAKE_CASE = True
out_lines.append(lowerCAmelCase__)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
__SCREAMING_SNAKE_CASE = f_name.replace(""".py""" , """""")
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , lowerCAmelCase__)
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
self._logger.info(f"Adding directory {output_dir}")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase__)
if needs_manual_update:
with_manual_update.append(lowerCAmelCase__)
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""") as f:
f.writelines(lowerCAmelCase__)
self._logger.info(f"Converted in {output_file}")
for utils_file in utils_files:
try:
__SCREAMING_SNAKE_CASE = os.path.basename(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace(""".py""" , """""")]
self._logger.info(f"Moving {dest_folder} to {utils_file}")
shutil.copy(lowerCAmelCase__ , lowerCAmelCase__)
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
| 155
| 0
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = SwinConfig()
snake_case__ = swin_name.split("_" )
snake_case__ = name_split[1]
snake_case__ = int(name_split[4] )
snake_case__ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ = 96
snake_case__ = (2, 2, 6, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ = 96
snake_case__ = (2, 2, 18, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ = 128
snake_case__ = (2, 2, 18, 2)
snake_case__ = (4, 8, 16, 32)
else:
snake_case__ = 192
snake_case__ = (2, 2, 18, 2)
snake_case__ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ = 21_841
else:
snake_case__ = 1_000
snake_case__ = "huggingface/label-files"
snake_case__ = "imagenet-1k-id2label.json"
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = img_size
snake_case__ = num_classes
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = num_heads
snake_case__ = window_size
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if "patch_embed.proj" in name:
snake_case__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case__ = "encoder." + name
if "attn.proj" in name:
snake_case__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case__ = "layernorm.weight"
if name == "norm.bias":
snake_case__ = "layernorm.bias"
if "head" in name:
snake_case__ = name.replace("head" , "classifier" )
else:
snake_case__ = "swin." + name
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ = key.split("." )
snake_case__ = int(key_split[1] )
snake_case__ = int(key_split[3] )
snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[
:dim
]
snake_case__ = val[
dim : dim * 2
]
snake_case__ = val[
-dim:
]
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
snake_case__ = get_swin_config(__lowerCAmelCase )
snake_case__ = SwinForImageClassification(__lowerCAmelCase )
model.eval()
snake_case__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case__ = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
snake_case__ = timm_model(inputs["pixel_values"] )
snake_case__ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 530
|
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = []
snake_case__ = 11
snake_case__ = int("1" + "0" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
snake_case__ = 10
return solutions
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 2 ):
snake_case__ = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
snake_case__ = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 530
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = """UperNetConfig"""
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = False , __UpperCAmelCase = 1 , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[int] = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , bias=__UpperCAmelCase , dilation=__UpperCAmelCase , )
lowerCAmelCase__ :Any = nn.BatchNormad(__UpperCAmelCase )
lowerCAmelCase__ :int = nn.ReLU()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.conv(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.batch_norm(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.activation(__UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[int] = [
nn.AdaptiveAvgPoolad(__UpperCAmelCase ),
UperNetConvModule(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = input
for layer in self.layers:
lowerCAmelCase__ :Any = layer(__UpperCAmelCase )
return hidden_state
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Union[str, Any] = pool_scales
lowerCAmelCase__ :Union[str, Any] = align_corners
lowerCAmelCase__ :List[str] = in_channels
lowerCAmelCase__ :Any = channels
lowerCAmelCase__ :Optional[int] = []
for i, pool_scale in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = UperNetPyramidPoolingBlock(pool_scale=__UpperCAmelCase , in_channels=__UpperCAmelCase , channels=__UpperCAmelCase )
self.blocks.append(__UpperCAmelCase )
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = []
for ppm in self.blocks:
lowerCAmelCase__ :Optional[Any] = ppm(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = nn.functional.interpolate(
__UpperCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(__UpperCAmelCase )
return ppm_outs
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Union[str, Any] = config
lowerCAmelCase__ :List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase__ :str = in_channels
lowerCAmelCase__ :Optional[int] = config.hidden_size
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase__ :Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase__ :List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase__ :str = nn.ModuleList()
lowerCAmelCase__ :Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase__ :str = UperNetConvModule(__UpperCAmelCase , self.channels , kernel_size=1 )
lowerCAmelCase__ :Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCAmelCase )
self.fpn_convs.append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = inputs[-1]
lowerCAmelCase__ :Optional[int] = [x]
psp_outs.extend(self.psp_modules(__UpperCAmelCase ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase , dim=1 )
lowerCAmelCase__ :Dict = self.bottleneck(__UpperCAmelCase )
return output
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCAmelCase ) )
# build top-down path
lowerCAmelCase__ :Any = len(__UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ :Dict = laterals[i - 1].shape[2:]
lowerCAmelCase__ :int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowerCAmelCase__ :int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ :Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowerCAmelCase__ :List[Any] = torch.cat(__UpperCAmelCase , dim=1 )
lowerCAmelCase__ :Union[str, Any] = self.fpn_bottleneck(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.classifier(__UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :int = config
lowerCAmelCase__ :Tuple = config.auxiliary_in_channels
lowerCAmelCase__ :List[str] = config.auxiliary_channels
lowerCAmelCase__ :Optional[Any] = config.auxiliary_num_convs
lowerCAmelCase__ :Dict = config.auxiliary_concat_input
lowerCAmelCase__ :Union[str, Any] = in_index
lowerCAmelCase__ :str = (kernel_size // 2) * dilation
lowerCAmelCase__ :Dict = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
if self.num_convs == 0:
lowerCAmelCase__ :str = nn.Identity()
else:
lowerCAmelCase__ :Tuple = nn.Sequential(*__UpperCAmelCase )
if self.concat_input:
lowerCAmelCase__ :Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=kernel_size // 2 )
lowerCAmelCase__ :Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = encoder_hidden_states[self.in_index]
lowerCAmelCase__ :Any = self.convs(__UpperCAmelCase )
if self.concat_input:
lowerCAmelCase__ :str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase__ :Tuple = self.classifier(__UpperCAmelCase )
return output
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = UperNetConfig
__magic_name__ :List[str] = """pixel_values"""
__magic_name__ :Any = True
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = value
__A = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , a , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase__ :Union[str, Any] = UperNetHead(__UpperCAmelCase , in_channels=self.backbone.channels )
lowerCAmelCase__ :Optional[int] = UperNetFCNHead(__UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def snake_case ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ :str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ :str = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase__ :List[Any] = self.backbone.forward_with_filtered_kwargs(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = outputs.feature_maps
lowerCAmelCase__ :Optional[Any] = self.decode_head(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = nn.functional.interpolate(__UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :int = None
if self.auxiliary_head is not None:
lowerCAmelCase__ :Union[str, Any] = self.auxiliary_head(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = nn.functional.interpolate(
__UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowerCAmelCase__ :List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase__ :Optional[int] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase__ :Any = (logits,) + outputs[1:]
else:
lowerCAmelCase__ :List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 93
|
from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def _a ( self , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = self.get_feature_extractor()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = floats_list((3, 1000) )
lowercase = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = """This is a test string"""
lowercase = processor(text=_lowerCAmelCase )
lowercase = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self , _lowerCAmelCase=(2, 10, 16) , _lowerCAmelCase=77 ) -> List[Any]:
'''simple docstring'''
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
lowercase = processor.decode(_lowerCAmelCase )
lowercase = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
lowercase = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
lowercase = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
lowercase = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
lowercase , lowercase , lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 15
lowercase = -20.0
lowercase = -4.0
lowercase = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
lowercase = [d[0][2] for d in decoded_decoder_out]
lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1E-3 ) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
lowercase = self._get_dummy_logits()
lowercase = 2.0
lowercase = 5.0
lowercase = -20.0
lowercase = True
lowercase = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
lowercase = decoded_processor_out.text
lowercase = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
lowercase = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = snapshot_download("""hf-internal-testing/processor_with_lm""" )
lowercase = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
lowercase = processor.decoder.model_container[processor.decoder._model_key]
lowercase = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
lowercase = os.listdir(_lowerCAmelCase )
lowercase = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = floats_list((3, 1000) )
lowercase = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
lowercase = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
lowercase = self._get_dummy_logits()
lowercase = processor_wavaveca.batch_decode(_lowerCAmelCase )
lowercase = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = self.get_decoder()
lowercase = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = [d[key] for d in offsets]
return retrieved_list
def _a ( self ) -> str:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()[0]
lowercase = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
lowercase = self._get_dummy_logits()
lowercase = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowercase = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
lowercase = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
lowercase = iter(_lowerCAmelCase )
lowercase = next(_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
lowercase = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
lowercase = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
lowercase = model(_lowerCAmelCase ).logits.cpu().numpy()
lowercase = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
lowercase = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
lowercase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
lowercase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 716
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : List[str])-> List[str]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case_ , )
assert hasattr(self , """env""")
def UpperCamelCase ( self : List[Any] , snake_case_ : Union[str, Any]=1)-> Tuple:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=snake_case_ , instance_type=self.instance_type , debugger_hook_config=snake_case_ , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def UpperCamelCase ( self : int , snake_case_ : Dict)-> List[Any]:
TrainingJobAnalytics(snake_case_).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def UpperCamelCase ( self : Optional[int])-> List[Any]:
# create estimator
__lowerCAmelCase =self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowerCAmelCase =TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCAmelCase =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
__lowerCAmelCase =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCAmelCase =(
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 99_99_99)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case_)
| 354
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "gpt_neo"
SCREAMING_SNAKE_CASE = ["past_key_values"]
SCREAMING_SNAKE_CASE = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Optional[int] , snake_case_ : int=5_02_57 , snake_case_ : Tuple=20_48 , snake_case_ : Optional[int]=20_48 , snake_case_ : int=24 , snake_case_ : List[Any]=[[["global", "local"], 12]] , snake_case_ : Tuple=16 , snake_case_ : Optional[int]=None , snake_case_ : Dict=2_56 , snake_case_ : List[str]="gelu_new" , snake_case_ : List[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.1 , snake_case_ : Union[str, Any]=1e-5 , snake_case_ : str=0.0_2 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=5_02_56 , snake_case_ : Tuple=5_02_56 , **snake_case_ : Optional[int] , )-> Tuple:
__lowerCAmelCase =vocab_size
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =hidden_size
__lowerCAmelCase =num_layers
__lowerCAmelCase =num_heads
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =window_size
__lowerCAmelCase =activation_function
__lowerCAmelCase =resid_dropout
__lowerCAmelCase =embed_dropout
__lowerCAmelCase =attention_dropout
__lowerCAmelCase =classifier_dropout
__lowerCAmelCase =layer_norm_epsilon
__lowerCAmelCase =initializer_range
__lowerCAmelCase =use_cache
__lowerCAmelCase =bos_token_id
__lowerCAmelCase =eos_token_id
__lowerCAmelCase =attention_types
__lowerCAmelCase =self.expand_attention_types_params(snake_case_)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers)}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""")
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_)
@staticmethod
def UpperCamelCase ( snake_case_ : Optional[int])-> Any:
__lowerCAmelCase =[]
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def __lowerCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> int:
import torch
__lowerCAmelCase =input.size()
__lowerCAmelCase =len(__lowerCamelCase )
__lowerCAmelCase =shape[dimension]
__lowerCAmelCase =torch.arange(0 , __lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =torch.div(sizedim - size , __lowerCamelCase , rounding_mode="""floor""" ) + 1
__lowerCAmelCase =torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None]
__lowerCAmelCase =[slice(__lowerCamelCase )] * rank
__lowerCAmelCase =indices
__lowerCAmelCase =input[s]
__lowerCAmelCase =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCamelCase )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> List[str]:
import torch
__lowerCAmelCase =torch.arange(1 , __lowerCamelCase )
__lowerCAmelCase =torch.remainder(__lowerCamelCase , __lowerCamelCase )
__lowerCAmelCase =remainders == 0
__lowerCAmelCase =candidates[divisor_indices]
__lowerCAmelCase =torch.max(__lowerCamelCase )
return largest_divisor, torch.div(__lowerCamelCase , __lowerCamelCase , rounding_mode="""floor""" )
class __a ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : Union[str, Any])-> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""")
__lowerCAmelCase ={0: """batch""", 1: """past_sequence + sequence"""}
else:
__lowerCAmelCase ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase ( self : Optional[int])-> int:
return self._config.num_heads
def UpperCamelCase ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , )-> Mapping[str, Any]:
__lowerCAmelCase =super(snake_case_ , self).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_)
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
__lowerCAmelCase , __lowerCAmelCase =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__lowerCAmelCase =seqlen + 2
__lowerCAmelCase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase =[
(torch.zeros(snake_case_), torch.zeros(snake_case_)) for _ in range(self.num_layers)
]
__lowerCAmelCase =common_inputs["""attention_mask"""]
if self.use_past:
__lowerCAmelCase =ordered_inputs["""attention_mask"""].dtype
__lowerCAmelCase =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_)] , dim=1)
return ordered_inputs
@property
def UpperCamelCase ( self : Union[str, Any])-> int:
return 13
| 354
| 1
|
'''simple docstring'''
import qiskit
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCamelCase : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCamelCase : Union[str, Any] = qiskit.QuantumCircuit(UpperCAmelCase ,UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
_UpperCamelCase : Dict = qiskit.execute(UpperCAmelCase ,UpperCAmelCase ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : str = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 713
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase_ : Optional[Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : str = """MIT"""
lowerCAmelCase_ : Optional[Any] = """1.0.0"""
lowerCAmelCase_ : Union[str, Any] = """Muhammad Umer Farooq"""
lowerCAmelCase_ : Any = """contact@muhammadumerfarooq.me"""
lowerCAmelCase_ : Dict = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase__ : str ) ->None:
'''simple docstring'''
super().__init__()
_UpperCamelCase : list[str] = []
_UpperCamelCase : int = domain
def snake_case__ ( self : str , lowercase__ : str , lowercase__ : list[tuple[str, str | None]] ) ->None:
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_UpperCamelCase : Optional[Any] = parse.urljoin(self.domain , lowercase__ )
self.urls.append(lowercase__ )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(UpperCAmelCase ).split("." )[-2:] )
def __A ( UpperCAmelCase ) -> str:
'''simple docstring'''
return parse.urlparse(UpperCAmelCase ).netloc
def __A ( UpperCAmelCase = "https://github.com" ) -> list[str]:
'''simple docstring'''
_UpperCamelCase : int = get_domain_name(UpperCAmelCase )
# Initialize the parser
_UpperCamelCase : Any = Parser(UpperCAmelCase )
try:
# Open URL
_UpperCamelCase : Union[str, Any] = requests.get(UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_UpperCamelCase : int = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_UpperCamelCase : Dict = requests.get(UpperCAmelCase )
# Get the valid email.
_UpperCamelCase : List[str] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = emails_from_url("""https://github.com""")
print(f"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails)))
| 204
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase = random.Random()
def _A ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ):
"""simple docstring"""
if rng is None:
__lowercase =global_rng
__lowercase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : List[Any]=4_0_0 , _lowerCAmelCase : Optional[int]=2_0_0_0 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Optional[Any]=1_6_0_0_0 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=8_0 , _lowerCAmelCase : Dict=1_6 , _lowerCAmelCase : Tuple=6_4 , _lowerCAmelCase : Any="hann_window" , _lowerCAmelCase : Dict=8_0 , _lowerCAmelCase : Optional[int]=7_6_0_0 , _lowerCAmelCase : str=1e-10 , _lowerCAmelCase : Union[str, Any]=True , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =min_seq_length
__lowercase =max_seq_length
__lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase =feature_size
__lowercase =padding_value
__lowercase =sampling_rate
__lowercase =do_normalize
__lowercase =num_mel_bins
__lowercase =hop_length
__lowercase =win_length
__lowercase =win_function
__lowercase =fmin
__lowercase =fmax
__lowercase =mel_floor
__lowercase =return_attention_mask
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str=False , _lowerCAmelCase : List[Any]=False):
'''simple docstring'''
def _flatten(_lowerCAmelCase : Optional[Any]):
return list(itertools.chain(*_lowerCamelCase))
if equal_length:
__lowercase =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
__lowercase =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowercase =[np.asarray(_lowerCamelCase) for x in speech_inputs]
return speech_inputs
def __lowerCamelCase ( self : int , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Optional[int]=False):
'''simple docstring'''
if equal_length:
__lowercase =[floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowercase =[
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__lowercase =[np.asarray(_lowerCamelCase) for x in speech_inputs]
return speech_inputs
@require_torch
class _UpperCamelCase ( _snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechTaFeatureExtractor
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =SpeechTaFeatureExtractionTester(self)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Tuple):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowerCamelCase , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(_lowerCamelCase , axis=0) - 1) < 1e-3))
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =[np.asarray(_lowerCamelCase) for speech_input in speech_inputs]
# Test not batched input
__lowercase =feat_extract(speech_inputs[0] , return_tensors='np').input_values
__lowercase =feat_extract(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
# Test batched
__lowercase =feat_extract(_lowerCamelCase , return_tensors='np').input_values
__lowercase =feat_extract(_lowerCamelCase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =['''longest''', '''max_length''', '''do_not_pad''']
__lowercase =[None, 1_6_0_0, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase):
__lowercase =feat_extract(_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , return_tensors='np')
__lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowercase =range(8_0_0 , 1_4_0_0 , 2_0_0)
__lowercase =[floats_list((1, x))[0] for x in lengths]
__lowercase =['''longest''', '''max_length''', '''do_not_pad''']
__lowercase =[None, 1_6_0_0, None]
for max_length, padding in zip(_lowerCamelCase , _lowerCamelCase):
__lowercase =feat_extract(_lowerCamelCase , max_length=_lowerCamelCase , padding=_lowerCamelCase)
__lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=1_0_0_0 , padding='max_length' , return_tensors='np')
__lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=1_0_0_0 , padding='longest' , return_tensors='np')
__lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0))
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =feat_extract(
_lowerCamelCase , truncation=_lowerCamelCase , max_length=2_0_0_0 , padding='longest' , return_tensors='np')
__lowercase =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0))
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowercase =np.random.rand(1_0_0).astype(np.floataa)
__lowercase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase =feature_extractor.pad([{'input_values': inputs}] , return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
__lowercase =feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowercase =[floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__lowercase =[np.asarray(_lowerCamelCase) for speech_input in speech_inputs]
# Test feature size
__lowercase =feature_extractor(audio_target=_lowerCamelCase , padding=_lowerCamelCase , return_tensors='np').input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
__lowercase =feature_extractor(speech_inputs[0] , return_tensors='np').input_values
__lowercase =feature_extractor(np_speech_inputs[0] , return_tensors='np').input_values
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
# Test batched
__lowercase =feature_extractor(_lowerCamelCase , return_tensors='np').input_values
__lowercase =feature_extractor(_lowerCamelCase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
# Test 2-D numpy arrays are batched.
__lowercase =[floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase =np.asarray(_lowerCamelCase)
__lowercase =feature_extractor(_lowerCamelCase , return_tensors='np').input_values
__lowercase =feature_extractor(_lowerCamelCase , return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(_lowerCamelCase , _lowerCamelCase):
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =self.feat_extract_tester.prepare_inputs_for_target()
__lowercase =self.feature_extraction_class(**self.feat_extract_dict)
__lowercase =feat_extract.model_input_names[0]
__lowercase =BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(_lowerCamelCase) == len(_lowerCamelCase) for x, y in zip(_lowerCamelCase , processed_features[input_name])))
__lowercase =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCamelCase)
__lowercase =BatchFeature({input_name: speech_inputs} , tensor_type='np')
__lowercase =processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowercase =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.feat_extract_tester.prepare_inputs_for_target(equal_length=_lowerCamelCase)
__lowercase =self.feature_extraction_class(**self.feat_extract_dict)
__lowercase =feat_extract.model_input_names[0]
__lowercase =BatchFeature({input_name: speech_inputs} , tensor_type='pt')
__lowercase =processed_features[input_name]
if len(batch_features_input.shape) < 3:
__lowercase =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.feature_extraction_class(**self.feat_extract_dict)
__lowercase =self.feat_extract_tester.prepare_inputs_for_target()
__lowercase =feat_extract.model_input_names[0]
__lowercase =BatchFeature({input_name: speech_inputs})
__lowercase =feat_extract.num_mel_bins # hack!
__lowercase =feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np')[input_name]
__lowercase =feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.feat_extract_dict
__lowercase =True
__lowercase =self.feature_extraction_class(**_lowerCamelCase)
__lowercase =self.feat_extract_tester.prepare_inputs_for_target()
__lowercase =[len(_lowerCamelCase) for x in speech_inputs]
__lowercase =feat_extract.model_input_names[0]
__lowercase =BatchFeature({input_name: speech_inputs})
__lowercase =feat_extract.num_mel_bins # hack!
__lowercase =feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , _lowerCamelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , _lowerCamelCase)
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =self.feat_extract_dict
__lowercase =True
__lowercase =self.feature_extraction_class(**_lowerCamelCase)
__lowercase =self.feat_extract_tester.prepare_inputs_for_target()
__lowercase =[len(_lowerCamelCase) for x in speech_inputs]
__lowercase =feat_extract.model_input_names[0]
__lowercase =BatchFeature({input_name: speech_inputs})
__lowercase =min(_lowerCamelCase)
__lowercase =feat_extract.num_mel_bins # hack!
__lowercase =feat_extract.pad(
_lowerCamelCase , padding='max_length' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='np')
self.assertIn('attention_mask' , _lowerCamelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : List[str]):
'''simple docstring'''
from datasets import load_dataset
__lowercase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation')
# automatic decoding with librispeech
__lowercase =ds.sort('id').select(range(_lowerCamelCase))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03])
# fmt: on
__lowercase =self._load_datasamples(1)
__lowercase =SpeechTaFeatureExtractor()
__lowercase =feature_extractor(_lowerCamelCase , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0))
self.assertTrue(torch.allclose(input_values[0, :3_0] , _lowerCamelCase , atol=1e-6))
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998])
# fmt: on
__lowercase =self._load_datasamples(1)
__lowercase =SpeechTaFeatureExtractor()
__lowercase =feature_extractor(audio_target=_lowerCamelCase , return_tensors='pt').input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , _lowerCamelCase , atol=1e-4))
| 474
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Optional[int] = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'unispeech'
def __init__( self , _lowerCamelCase=32 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase="group" , _lowerCamelCase="gelu" , _lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , _lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , _lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , _lowerCamelCase=False , _lowerCamelCase=128 , _lowerCamelCase=16 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.05 , _lowerCamelCase=10 , _lowerCamelCase=2 , _lowerCamelCase=0.0 , _lowerCamelCase=10 , _lowerCamelCase=0 , _lowerCamelCase=320 , _lowerCamelCase=2 , _lowerCamelCase=0.1 , _lowerCamelCase=100 , _lowerCamelCase=256 , _lowerCamelCase=256 , _lowerCamelCase=0.1 , _lowerCamelCase="mean" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=256 , _lowerCamelCase=80 , _lowerCamelCase=0 , _lowerCamelCase=1 , _lowerCamelCase=2 , _lowerCamelCase=0.5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
a :Any = hidden_size
a :str = feat_extract_norm
a :List[Any] = feat_extract_activation
a :Tuple = list(_lowerCamelCase )
a :Any = list(_lowerCamelCase )
a :List[Any] = list(_lowerCamelCase )
a :Union[str, Any] = conv_bias
a :str = num_conv_pos_embeddings
a :str = num_conv_pos_embedding_groups
a :Tuple = len(self.conv_dim )
a :int = num_hidden_layers
a :Any = intermediate_size
a :Optional[Any] = hidden_act
a :Tuple = num_attention_heads
a :Any = hidden_dropout
a :Any = attention_dropout
a :Optional[Any] = activation_dropout
a :Optional[Any] = feat_proj_dropout
a :Any = final_dropout
a :int = layerdrop
a :int = layer_norm_eps
a :Dict = initializer_range
a :Dict = num_ctc_classes
a :Optional[Any] = vocab_size
a :str = do_stable_layer_norm
a :Tuple = use_weighted_layer_sum
a :Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a :List[Any] = apply_spec_augment
a :Any = mask_time_prob
a :Union[str, Any] = mask_time_length
a :str = mask_time_min_masks
a :Tuple = mask_feature_prob
a :Dict = mask_feature_length
a :int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
a :Union[str, Any] = num_codevectors_per_group
a :Dict = num_codevector_groups
a :List[Any] = contrastive_logits_temperature
a :Union[str, Any] = feat_quantizer_dropout
a :Optional[Any] = num_negatives
a :Tuple = codevector_dim
a :Optional[Any] = proj_codevector_dim
a :Union[str, Any] = diversity_loss_weight
# ctc loss
a :List[Any] = ctc_loss_reduction
a :Union[str, Any] = ctc_zero_infinity
# pretraining loss
a :int = replace_prob
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 445
| 0
|
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->int:
lowercase_ = True
while ask_again:
lowercase_ = input(SCREAMING_SNAKE_CASE_ )
try:
if default is not None and len(SCREAMING_SNAKE_CASE_ ) == 0:
return default
return convert_value(SCREAMING_SNAKE_CASE_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 ) ->Dict:
lowercase_ = BulletMenu(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = menu.run(default_choice=SCREAMING_SNAKE_CASE_ )
return convert_value(SCREAMING_SNAKE_CASE_ ) if convert_value is not None else result
def A_ ( SCREAMING_SNAKE_CASE_ ) ->str:
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Tuple:
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
lowercase_ = int(SCREAMING_SNAKE_CASE_ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Any ):
'''simple docstring'''
lowercase_ = super()._format_usage(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase_ = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 718
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) ->None:
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_faster()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_fastest()""" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 603
| 0
|
from copy import deepcopy
class A_ :
"""simple docstring"""
def __init__( self : Optional[int] ,__A : list[int] | None = None ,__A : int | None = None ) -> None:
if arr is None and size is not None:
_lowercase = size
_lowercase = [0] * size
elif arr is not None:
self.init(__A )
else:
raise ValueError('Either arr or size must be specified' )
def __UpperCAmelCase ( self : Optional[int] ,__A : list[int] ) -> None:
_lowercase = len(__A )
_lowercase = deepcopy(__A )
for i in range(1 ,self.size ):
_lowercase = self.next_(__A )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : List[Any] ) -> list[int]:
_lowercase = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
_lowercase = self.next_(__A )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __A : int ) -> int:
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __A : int ) -> int:
return index - (index & (-index))
def __UpperCAmelCase ( self : Any ,__A : int ,__A : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowercase = self.next_(__A )
def __UpperCAmelCase ( self : List[Any] ,__A : int ,__A : int ) -> None:
self.add(__A ,value - self.get(__A ) )
def __UpperCAmelCase ( self : List[str] ,__A : int ) -> int:
if right == 0:
return 0
_lowercase = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowercase = self.prev(__A )
return result
def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : int ) -> int:
return self.prefix(__A ) - self.prefix(__A )
def __UpperCAmelCase ( self : Any ,__A : int ) -> int:
return self.query(__A ,index + 1 )
def __UpperCAmelCase ( self : str ,__A : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
_lowercase = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowercase = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 67
| 1
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_a : int= Lock()
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__snake_case : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__snake_case : List[Any] = min(__lowerCAmelCase , __lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__snake_case : Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__snake_case : List[Any] = max(__lowerCAmelCase , __lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCAmelCase )
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] ) -> str:
'''simple docstring'''
__snake_case : Any = []
__snake_case : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__snake_case : Dict = Pipe()
__snake_case : int = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__snake_case : Optional[Any] = temp_rs
__snake_case : Optional[int] = temp_rr
for i in range(1 , len(__lowerCAmelCase ) - 1 ):
__snake_case : Optional[int] = Pipe()
__snake_case : Tuple = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__snake_case : Dict = temp_rs
__snake_case : Optional[Any] = temp_rr
process_array_.append(
Process(
target=__lowerCAmelCase , args=(
len(__lowerCAmelCase ) - 1,
arr[len(__lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCAmelCase ) ):
__snake_case : Dict = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[int] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*__lowerCAmelCase )
__snake_case : List[Any] = odd_even_transposition(__lowerCAmelCase )
print('Sorted List\n' )
print(*__lowerCAmelCase )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case : List[str] = tf.convert_to_tensor(UpperCAmelCase_ )
__snake_case : List[str] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
__snake_case : Optional[int] = tf.convert_to_tensor(UpperCAmelCase_ )
__snake_case : Any = tf.cast(math.pi , x.dtype )
__snake_case : List[Any] = tf.cast(0.044_715 , x.dtype )
__snake_case : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCAmelCase_ , 3 )) ))
return x * cdf
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case : Optional[int] = tf.convert_to_tensor(UpperCAmelCase_ )
return x * tf.tanh(tf.math.softplus(UpperCAmelCase_ ) )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = tf.convert_to_tensor(UpperCAmelCase_ )
__snake_case : Dict = tf.cast(0.044_715 , x.dtype )
__snake_case : int = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> Dict:
'''simple docstring'''
__snake_case : Any = tf.convert_to_tensor(UpperCAmelCase_ )
__snake_case : Dict = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return tf.clip_by_value(_gelu(UpperCAmelCase_ ) , -10 , 10 )
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=-1 ) -> int:
'''simple docstring'''
__snake_case , __snake_case : str = tf.split(UpperCAmelCase_ , 2 , axis=UpperCAmelCase_ )
return a * tf.math.sigmoid(UpperCAmelCase_ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple ) -> int:
'''simple docstring'''
return tf.keras.activations.gelu(UpperCAmelCase_ , approximate=UpperCAmelCase_ )
_a : Tuple= tf.keras.activations.gelu
_a : Tuple= approximate_gelu_wrap
else:
_a : Any= _gelu
_a : str= _gelu_new
_a : List[str]= {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 192
| 0
|
'''simple docstring'''
UpperCamelCase_ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Dict ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [False] * len(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [s]
SCREAMING_SNAKE_CASE : Optional[Any] = True
while queue:
SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Any = u
return visited[t]
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [-1] * (len(__UpperCamelCase ))
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : str = float('Inf' )
SCREAMING_SNAKE_CASE : str = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE : List[str] = min(__UpperCamelCase ,graph[parent[s]][s] )
SCREAMING_SNAKE_CASE : Optional[Any] = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE : Union[str, Any] = sink
while v != source:
SCREAMING_SNAKE_CASE : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE : Dict = parent[v]
for i in range(len(__UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 28
|
"""simple docstring"""
a__ : Optional[int] = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
a__ : Optional[int] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
a__ : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a__ : Union[str, Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
a__ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
a__ : Optional[Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
a__ : Union[str, Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
a__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 589
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCamelCase : List[Any] = getLogger(__name__)
lowerCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowercase__( A , A , A , A = 8 , A = DEFAULT_DEVICE , A=False , A="summarization" , A=None , **A , ):
snake_case__ : int = Path(A ).open('w' , encoding='utf-8' )
snake_case__ : Tuple = str(A )
snake_case__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(A ).to(A )
if fpaa:
snake_case__ : str = model.half()
snake_case__ : Any = AutoTokenizer.from_pretrained(A )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
snake_case__ : str = time.time()
# update config with task specific params
use_task_specific_params(A , A )
if prefix is None:
snake_case__ : Optional[Any] = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(A , A ) ) ):
snake_case__ : Union[str, Any] = [prefix + text for text in examples_chunk]
snake_case__ : List[Any] = tokenizer(A , return_tensors='pt' , truncation=A , padding='longest' ).to(A )
snake_case__ : Optional[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A , )
snake_case__ : List[Any] = tokenizer.batch_decode(A , skip_special_tokens=A , clean_up_tokenization_spaces=A )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
snake_case__ : Tuple = int(time.time() - start_time ) # seconds
snake_case__ : List[Any] = len(A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase__( ):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def lowercase__( A=True ):
snake_case__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('model_name' , type=A , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=A , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=A , help='where to save summaries' )
parser.add_argument('--reference_path' , type=A , required=A , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=A , required=A , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=A , required=A , default=A , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=A , required=A , default=A , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=A , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=A , default=8 , required=A , help='batch size' )
parser.add_argument(
'--n_obs' , type=A , default=-1 , required=A , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=A , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case__ , snake_case__ : Union[str, Any] = parser.parse_known_args()
snake_case__ : Optional[int] = parse_numeric_n_bool_cl_kwargs(A )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
snake_case__ : Tuple = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case__ : Tuple = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
snake_case__ : Optional[Any] = generate_summaries_or_translations(
A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A , )
if args.reference_path is None:
return {}
# Compute scores
snake_case__ : Union[str, Any] = calculate_bleu if 'translation' in args.task else calculate_rouge
snake_case__ : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case__ : List[str] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A )]
snake_case__ : dict = score_fn(A , A )
scores.update(A )
if args.dump_args:
scores.update(A )
if args.info:
snake_case__ : Any = args.info
if verbose:
print(A )
if args.score_path is not None:
json.dump(A , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 303
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase : str = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = symbols('ct x y z')
def lowercase__( A ):
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowercase__( A ):
return 1 / sqrt(1 - beta(A ) ** 2 )
def lowercase__( A ):
return np.array(
[
[gamma(A ), -gamma(A ) * beta(A ), 0, 0],
[-gamma(A ) * beta(A ), gamma(A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase__( A , A = None ):
# Ensure event is not empty
if event is None:
snake_case__ : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase : Dict = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase : Dict = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 303
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) == 0:
return False
lowerCamelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] ,lowerCAmelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] ,lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict =input("""Enter numbers separated by comma:\n""").strip()
_lowercase : Dict =[int(item.strip()) for item in user_input.split(""",""")]
_lowercase : str =int(input("""Enter the number to be found in the list:\n""").strip())
_lowercase : Tuple ="""""" if binary_search(sequence, target) else """not """
print(F'''{target} was {not_str}found in {sequence}''')
| 364
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowercase : List[str] =5_0000
_lowercase : str =5000
_lowercase , _lowercase : List[str] =os.path.split(__file__)
_lowercase : Union[str, Any] =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i : i + batch_size]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ : Tuple = dataset[i]
@get_duration
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : Optional[int] = dataset[i : i + batch_size]
def _SCREAMING_SNAKE_CASE ( ):
lowerCamelCase_ : Any = {'num examples': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
lowerCamelCase_ : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_00}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10_00}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
lowerCamelCase_ : List[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
lowerCamelCase_ : Union[str, Any] = generate_example_dataset(
os.path.join(lowerCAmelCase__ ,'dataset.arrow' ) ,lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes={'list': (1_00,)} ,)
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[int] = func(lowerCAmelCase__ ,**lowerCAmelCase__ )
print('shuffling dataset' )
lowerCamelCase_ : int = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' ,func.__name__ ,str(lowerCAmelCase__ ) )
lowerCamelCase_ : Optional[Any] = func(
lowerCAmelCase__ ,**lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'wb' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 364
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase__ ="docs/source/en/_toctree.yml"
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = defaultdict(UpperCamelCase__ )
__lowercase = []
__lowercase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(UpperCamelCase__ )
__lowercase = new_doc_list
__lowercase = [key for key, value in counts.items() if value > 1]
__lowercase = []
for duplicate_key in duplicates:
__lowercase = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__lowercase = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def lowerCAmelCase_ ( UpperCamelCase__ : int=False ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
__lowercase = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase = content[api_idx]["""sections"""]
# Then to the model doc
__lowercase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowercase = api_doc[scheduler_idx]["""sections"""]
__lowercase = clean_doc_toc(UpperCamelCase__ )
__lowercase = False
if new_scheduler_doc != scheduler_doc:
__lowercase = True
if overwrite:
__lowercase = new_scheduler_doc
if diff:
if overwrite:
__lowercase = api_doc
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def lowerCAmelCase_ ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding="""utf-8""" ) as f:
__lowercase = yaml.safe_load(f.read() )
# Get to the API doc
__lowercase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowercase = content[api_idx]["""sections"""]
# Then to the model doc
__lowercase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowercase = False
__lowercase = api_doc[pipeline_idx]["""sections"""]
__lowercase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowercase = pipeline_doc["""section"""]
__lowercase = clean_doc_toc(UpperCamelCase__ )
if overwrite:
__lowercase = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
__lowercase = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
__lowercase = True
if overwrite:
__lowercase = new_pipeline_docs
if diff:
if overwrite:
__lowercase = api_doc
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCAmelCase__ =parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 442
|
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
if n == 0:
return 0
__lowercase = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase = max(
UpperCamelCase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCamelCase__ ) )
return max_revue
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list , UpperCamelCase__ : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowercase = float("""-inf""" )
for i in range(1 , n + 1 ):
__lowercase = max(
UpperCamelCase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCamelCase__ , UpperCamelCase__ ) , )
__lowercase = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
_enforce_args(UpperCamelCase__ , UpperCamelCase__ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowercase = [float("""-inf""" ) for _ in range(n + 1 )]
__lowercase = 0
for i in range(1 , n + 1 ):
__lowercase = max_rev[i]
for j in range(1 , i + 1 ):
__lowercase = max(UpperCamelCase__ , prices[j - 1] + max_rev[i - j] )
__lowercase = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : list ):
"""simple docstring"""
if n < 0:
__lowercase = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(UpperCamelCase__ )
if n > len(UpperCamelCase__ ):
__lowercase = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(UpperCamelCase__ )}'''
)
raise ValueError(UpperCamelCase__ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = [6, 10, 12, 15, 20, 23]
__lowercase = len(UpperCamelCase__ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowercase = 36
__lowercase = top_down_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = bottom_up_cut_rod(UpperCamelCase__ , UpperCamelCase__ )
__lowercase = naive_cut_rod_recursive(UpperCamelCase__ , UpperCamelCase__ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 442
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """vivit"""
def __init__( self : Tuple , UpperCamelCase__ : Tuple=224 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Optional[Any]=[2, 16, 16] , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Optional[int]="gelu_fast" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[Any]=1E-06 , UpperCamelCase__ : Union[str, Any]=True , **UpperCamelCase__ : Dict , ) -> int:
"""simple docstring"""
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = num_frames
__magic_name__ = tubelet_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
super().__init__(**UpperCamelCase__ )
| 529
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = prime_factors(A_ )
if is_square_free(A_ ):
return -1 if len(A_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
| 1
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
A__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
A__ = {
'''ctrl''': 256,
}
A__ = {
'''Pregnancy''': 16_8629,
'''Christianity''': 7675,
'''Explain''': 10_6423,
'''Fitness''': 6_3440,
'''Saving''': 6_3163,
'''Ask''': 2_7171,
'''Ass''': 9_5985,
'''Joke''': 16_3509,
'''Questions''': 4_5622,
'''Thoughts''': 4_9605,
'''Retail''': 5_2342,
'''Feminism''': 16_4338,
'''Writing''': 1_1992,
'''Atheism''': 19_2263,
'''Netflix''': 4_8616,
'''Computing''': 3_9639,
'''Opinion''': 4_3213,
'''Alone''': 4_4967,
'''Funny''': 5_8917,
'''Gaming''': 4_0358,
'''Human''': 4088,
'''India''': 1331,
'''Joker''': 7_7138,
'''Diet''': 3_6206,
'''Legal''': 1_1859,
'''Norman''': 4939,
'''Tip''': 7_2689,
'''Weight''': 5_2343,
'''Movies''': 4_6273,
'''Running''': 2_3425,
'''Science''': 2090,
'''Horror''': 3_7793,
'''Confession''': 6_0572,
'''Finance''': 1_2250,
'''Politics''': 1_6360,
'''Scary''': 19_1985,
'''Support''': 1_2654,
'''Technologies''': 3_2516,
'''Teenage''': 6_6160,
'''Event''': 3_2769,
'''Learned''': 6_7460,
'''Notion''': 18_2770,
'''Wikipedia''': 3_7583,
'''Books''': 6665,
'''Extract''': 7_6050,
'''Confessions''': 10_2701,
'''Conspiracy''': 7_5932,
'''Links''': 6_3674,
'''Narcissus''': 15_0425,
'''Relationship''': 5_4766,
'''Relationships''': 13_4796,
'''Reviews''': 4_1671,
'''News''': 4256,
'''Translation''': 2_6820,
'''multilingual''': 12_8406,
}
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : List[Any] = set()
snake_case__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : int = char
snake_case__ : List[str] = set(__lowerCAmelCase )
return pairs
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = CONTROL_CODES
def __init__( self :List[Any] ,__lowercase :Dict ,__lowercase :List[Any] ,__lowercase :Any="<unk>" ,**__lowercase :Optional[int] ):
super().__init__(unk_token=__lowercase ,**__lowercase )
with open(__lowercase ,encoding='''utf-8''' ) as vocab_handle:
snake_case__ : Optional[Any] = json.load(__lowercase )
snake_case__ : Any = {v: k for k, v in self.encoder.items()}
with open(__lowercase ,encoding='''utf-8''' ) as merges_handle:
snake_case__ : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
snake_case__ : Any = [tuple(merge.split() ) for merge in merges]
snake_case__ : List[Any] = dict(zip(__lowercase ,range(len(__lowercase ) ) ) )
snake_case__ : Tuple = {}
@property
def __lowerCamelCase ( self :Optional[Any] ):
return len(self.encoder )
def __lowerCamelCase ( self :int ):
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Optional[int] = tuple(__lowercase )
snake_case__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
snake_case__ : Optional[Any] = get_pairs(__lowercase )
if not pairs:
return token
while True:
snake_case__ : Optional[int] = min(__lowercase ,key=lambda __lowercase : self.bpe_ranks.get(__lowercase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ : int = bigram
snake_case__ : str = []
snake_case__ : Tuple = 0
while i < len(__lowercase ):
try:
snake_case__ : Dict = word.index(__lowercase ,__lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : Optional[int] = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : List[str] = tuple(__lowercase )
snake_case__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
snake_case__ : int = get_pairs(__lowercase )
snake_case__ : Optional[int] = '''@@ '''.join(__lowercase )
snake_case__ : Tuple = word[:-4]
snake_case__ : Union[str, Any] = word
return word
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Dict ):
snake_case__ : Union[str, Any] = []
snake_case__ : List[str] = re.findall(r'''\S+\n?''' ,__lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(''' ''' ) ) )
return split_tokens
def __lowerCamelCase ( self :Tuple ,__lowercase :int ):
return self.encoder.get(__lowercase ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self :str ,__lowercase :Optional[int] ):
return self.decoder.get(__lowercase ,self.unk_token )
def __lowerCamelCase ( self :str ,__lowercase :Any ):
snake_case__ : Any = ''' '''.join(__lowercase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def __lowerCamelCase ( self :Dict ,__lowercase :str ,__lowercase :Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : Tuple = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Tuple = os.path.join(
__lowercase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowercase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowercase ,ensure_ascii=__lowercase ) + '''\n''' )
snake_case__ : Tuple = 0
with open(__lowercase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
snake_case__ : Optional[int] = token_index
writer.write(''' '''.join(__lowercase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 219
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : Optional[int] = """markuplm"""
def __init__( self :int ,__lowercase :str=3_0_5_2_2 ,__lowercase :str=7_6_8 ,__lowercase :str=1_2 ,__lowercase :Dict=1_2 ,__lowercase :Optional[Any]=3_0_7_2 ,__lowercase :Any="gelu" ,__lowercase :Optional[int]=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Any=5_1_2 ,__lowercase :List[Any]=2 ,__lowercase :Tuple=0.02 ,__lowercase :List[Any]=1e-1_2 ,__lowercase :List[Any]=0 ,__lowercase :Optional[int]=0 ,__lowercase :str=2 ,__lowercase :Optional[Any]=2_5_6 ,__lowercase :List[str]=1_0_2_4 ,__lowercase :List[str]=2_1_6 ,__lowercase :Union[str, Any]=1_0_0_1 ,__lowercase :int=3_2 ,__lowercase :Union[str, Any]=5_0 ,__lowercase :Optional[Any]="absolute" ,__lowercase :int=True ,__lowercase :Optional[Any]=None ,**__lowercase :Union[str, Any] ,):
super().__init__(
pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,**__lowercase ,)
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = hidden_act
snake_case__ : Dict = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : str = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Optional[Any] = use_cache
snake_case__ : Optional[Any] = classifier_dropout
# additional properties
snake_case__ : Any = max_depth
snake_case__ : Optional[Any] = max_xpath_tag_unit_embeddings
snake_case__ : Dict = max_xpath_subs_unit_embeddings
snake_case__ : str = tag_pad_id
snake_case__ : Union[str, Any] = subs_pad_id
snake_case__ : List[str] = xpath_unit_hidden_size
| 219
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =ShapEPipeline
__UpperCAmelCase : Union[str, Any] =["""prompt"""]
__UpperCAmelCase : int =["""prompt"""]
__UpperCAmelCase : List[Any] =[
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__UpperCAmelCase : Dict =False
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return 32
@property
def snake_case ( self ):
return self.time_input_dim * 4
@property
def snake_case ( self ):
return 8
@property
def snake_case ( self ):
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__a )
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
__lowerCAmelCase = PriorTransformer(**__a )
return model
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase = ShapERenderer(**__a )
return model
def snake_case ( self ):
__lowerCAmelCase = self.dummy_prior
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = self.dummy_tokenizer
__lowerCAmelCase = self.dummy_renderer
__lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__lowerCAmelCase = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def snake_case ( self , __a , __a=0 ):
if str(__a ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(__a )
else:
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__lowerCAmelCase = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def snake_case ( self ):
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__a )
__lowerCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__a ) )
__lowerCAmelCase = output.images[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case ( self ):
__lowerCAmelCase = torch_device == "cpu"
__lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def snake_case ( self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__a )
__lowerCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase = batch_size * [inputs[key]]
__lowerCAmelCase = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
__lowerCAmelCase = ShapEPipeline.from_pretrained("openai/shap-e" )
__lowerCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(0 )
__lowerCAmelCase = pipe(
"a shark" , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 636
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =(KDPMaDiscreteScheduler,)
__UpperCAmelCase : Optional[Any] =1_0
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def snake_case ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def snake_case ( self ):
if torch_device == "mps":
return
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowerCAmelCase = scheduler.scale_model_input(__a , __a )
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a )
__lowerCAmelCase = output.prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 636
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'swinv2'
_A = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :Union[str, Any] , a :Dict=2_2_4 , a :Dict=4 , a :List[Any]=3 , a :Optional[Any]=9_6 , a :int=[2, 2, 6, 2] , a :int=[3, 6, 1_2, 2_4] , a :Tuple=7 , a :List[Any]=4.0 , a :Union[str, Any]=True , a :Optional[Any]=0.0 , a :int=0.0 , a :str=0.1 , a :str="gelu" , a :List[str]=False , a :Dict=0.02 , a :Dict=1E-5 , a :Optional[int]=3_2 , **a :Optional[Any] , ) -> Optional[Any]:
super().__init__(**a )
__UpperCamelCase : List[str] = image_size
__UpperCamelCase : Dict = patch_size
__UpperCamelCase : List[str] = num_channels
__UpperCamelCase : Any = embed_dim
__UpperCamelCase : Dict = depths
__UpperCamelCase : Dict = len(a )
__UpperCamelCase : Dict = num_heads
__UpperCamelCase : Tuple = window_size
__UpperCamelCase : int = mlp_ratio
__UpperCamelCase : Optional[Any] = qkv_bias
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : str = attention_probs_dropout_prob
__UpperCamelCase : List[Any] = drop_path_rate
__UpperCamelCase : Optional[int] = hidden_act
__UpperCamelCase : List[Any] = use_absolute_embeddings
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase : Dict = int(embed_dim * 2 ** (len(a ) - 1) )
__UpperCamelCase : Tuple = (0, 0, 0, 0)
| 94
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = BertConfig.from_json_file(_lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : List[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 94
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
SCREAMING_SNAKE_CASE__ : List[str] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
SCREAMING_SNAKE_CASE__ : Dict = "▁"
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = VOCAB_FILES_NAMES
a__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : int="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Dict="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCamelCase = (
AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ , normalized=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else mask_token
)
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = do_lower_case
__lowerCamelCase = remove_space
__lowerCamelCase = keep_accents
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : List[Any] ) -> Dict:
return len(self.sp_model )
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Dict:
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE__ : int ) -> Any:
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
if self.remove_space:
__lowerCamelCase = ''' '''.join(inputs.strip().split() )
else:
__lowerCamelCase = inputs
__lowerCamelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__lowerCamelCase = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] )
if self.do_lower_case:
__lowerCamelCase = outputs.lower()
return outputs
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCamelCase = self.preprocess_text(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowerCamelCase = cur_pieces[1:]
else:
__lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE__ )
else:
new_pieces.append(SCREAMING_SNAKE_CASE__ )
return new_pieces
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> str:
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
__lowerCamelCase = []
__lowerCamelCase = ''''''
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 298
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A : List[Any] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any =['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_A : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
|
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
| 1
|
a = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
a = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
_UpperCAmelCase = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_UpperCAmelCase = """"""
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
_UpperCAmelCase = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 518
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a = "facebook/wmt19-en-de"
a = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
a = tokenizer(["Making tiny model"], return_tensors="pt")
a = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
a = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 518
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = '''biogpt'''
def __init__( self , A__=4_2384 , A__=1024 , A__=24 , A__=16 , A__=4096 , A__="gelu" , A__=0.1 , A__=0.1 , A__=1024 , A__=0.0_2 , A__=1e-12 , A__=True , A__=True , A__=0.0 , A__=0.0 , A__=1 , A__=0 , A__=2 , **A__ , ):
A__ : Optional[int] = vocab_size
A__ : List[str] = max_position_embeddings
A__ : Optional[int] = hidden_size
A__ : Tuple = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Dict = hidden_act
A__ : List[Any] = hidden_dropout_prob
A__ : Union[str, Any] = attention_probs_dropout_prob
A__ : Union[str, Any] = initializer_range
A__ : Optional[int] = layer_norm_eps
A__ : Tuple = scale_embedding
A__ : Tuple = use_cache
A__ : Dict = layerdrop
A__ : Optional[Any] = activation_dropout
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
| 64
|
class _a :
'''simple docstring'''
def __init__( self ):
A__ : str = """"""
A__ : Any = """"""
A__ : List[Any] = []
def __A ( self , A__ , A__ ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Union[str, Any] = self.__min_dist_top_down_dp(A__ , n - 1 )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , A__ )
A__ : Union[str, Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : List[Any] = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
def __A ( self , A__ , A__ ):
A__ : Tuple = worda
A__ : Dict = worda
A__ : Optional[Any] = [[-1 for _ in range(len(A__ ) )] for _ in range(len(A__ ) )]
return self.__min_dist_top_down_dp(len(A__ ) - 1 , len(A__ ) - 1 )
def __A ( self , A__ , A__ ):
A__ : Optional[Any] = worda
A__ : Dict = worda
A__ : Union[str, Any] = len(A__ )
A__ : List[str] = len(A__ )
A__ : int = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Tuple = j
elif j == 0: # second string is empty
A__ : Dict = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : str = self.dp[i - 1][j - 1]
else:
A__ : Union[str, Any] = self.dp[i][j - 1]
A__ : str = self.dp[i - 1][j]
A__ : Union[str, Any] = self.dp[i - 1][j - 1]
A__ : Tuple = 1 + min(A__ , A__ , A__ )
return self.dp[m][n]
if __name__ == "__main__":
A_ : Union[str, Any] = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
A_ : int = input('Enter the first string: ').strip()
A_ : List[str] = input('Enter the second string: ').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 64
| 1
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = F"Input value of [number={number}] must be an integer"
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
_A = F"Input value of [number={number}] must be > 0"
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = 1
for i in range(1 , _SCREAMING_SNAKE_CASE ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : Optional[Any] ={
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] =[
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
A_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274
| 0
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_lowerCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : List[Any] ):
__magic_name__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
__magic_name__ = self.transformer_dir
shutil.copy(
os.path.join(lowercase__ , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def snake_case__ ( self : Tuple , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : Dict=None ):
__magic_name__ = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__magic_name__ = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__magic_name__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__magic_name__ = black.format_str(lowercase__ , mode=lowercase__ )
__magic_name__ = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(lowercase__ , '''w''' , newline='''\n''' ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase__ )
with open(lowercase__ , '''r''' ) as f:
self.assertTrue(f.read() , lowercase__ )
def snake_case__ ( self : str ):
__magic_name__ = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(lowercase__ , lowercase__ )
def snake_case__ ( self : int ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , lowercase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , lowercase__ ) , )
# Copy consistency with a really long name
__magic_name__ = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub('''Bert''' , lowercase__ , lowercase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , lowercase__ , overwrite_result=re.sub('''Bert''' , '''TestModel''' , lowercase__ ) , )
def snake_case__ ( self : List[Any] ):
__magic_name__ = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__magic_name__ = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['''format_model_list'''] )
self.assertFalse(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
__magic_name__ = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowercase__ )
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__magic_name__ = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__magic_name__ = check_copies.convert_to_localized_md(
lowercase__ , lowercase__ , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(lowercase__ , lowercase__ )
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :List[str] = """biogpt"""
def __init__( self : Union[str, Any] , a__ : Dict=4_2384 , a__ : Union[str, Any]=1024 , a__ : List[Any]=24 , a__ : Any=16 , a__ : List[Any]=4096 , a__ : Any="gelu" , a__ : Optional[int]=0.1 , a__ : List[Any]=0.1 , a__ : Optional[Any]=1024 , a__ : Union[str, Any]=0.02 , a__ : int=1E-12 , a__ : List[Any]=True , a__ : Tuple=True , a__ : str=0.0 , a__ : Any=0.0 , a__ : Optional[int]=1 , a__ : Tuple=0 , a__ : Dict=2 , **a__ : Tuple , ):
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = scale_embedding
__magic_name__ = use_cache
__magic_name__ = layerdrop
__magic_name__ = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 245
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Tuple ) -> Dict:
__lowerCamelCase : Optional[Any] = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
__lowerCamelCase : List[Any] = {
'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase : Optional[Any] = model(_a )['last_hidden_state']
__lowerCamelCase : List[str] = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
__lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 459
|
'''simple docstring'''
import random
from typing import Any
def a_ ( _lowerCAmelCase ) -> list[Any]:
for _ in range(len(_lowerCAmelCase ) ):
__lowerCamelCase : Optional[Any] = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase : str = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase ,__lowerCamelCase : List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 459
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : int=18 , UpperCamelCase__ : Dict=30 , UpperCamelCase__ : List[str]=400 , UpperCamelCase__ : int=True , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=True , UpperCamelCase__ : str=[0.5, 0.5, 0.5] , UpperCamelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
_UpperCamelCase =size if size is not None else {'''shortest_edge''': 18}
_UpperCamelCase =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =num_channels
_UpperCamelCase =image_size
_UpperCamelCase =min_resolution
_UpperCamelCase =max_resolution
_UpperCamelCase =do_resize
_UpperCamelCase =size
_UpperCamelCase =do_center_crop
_UpperCamelCase =crop_size
_UpperCamelCase =do_normalize
_UpperCamelCase =image_mean
_UpperCamelCase =image_std
def UpperCamelCase__ ( self : List[Any] ) -> str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : List[str] ) -> Optional[int]:
_UpperCamelCase =LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : str ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase_ , '''size''' ) )
def UpperCamelCase__ ( self : int ) -> Any:
_UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self : str ) -> int:
pass
def UpperCamelCase__ ( self : Dict ) -> Tuple:
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ ( self : Dict ) -> Optional[int]:
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase__ ( self : Tuple ) -> str:
_UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_UpperCamelCase =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase =image_processing(UpperCAmelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 702
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__lowerCamelCase : Optional[int] = TypeVar('T')
class UpperCAmelCase ( Generic[T]):
"""simple docstring"""
lowerCAmelCase_ = 42 # Cache store of keys
lowerCAmelCase_ = 42 # References of the keys in cache
lowerCAmelCase_ = 10 # Maximum capacity of cache
def __init__( self : Union[str, Any] , UpperCamelCase__ : int ) -> None:
_UpperCamelCase =deque()
_UpperCamelCase =set()
if not n:
_UpperCamelCase =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
_UpperCamelCase =n
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_UpperCamelCase =self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : str ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 271
| 0
|
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 13_10_72,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
}
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) ->str:
return torch.atana(snake_case_ , snake_case_ ) / math.pi * 2
def lowerCAmelCase_ ( snake_case_ : Any ) ->List[str]:
lowerCamelCase__ : Dict =torch.sin(t * math.pi / 2 ) ** 2
lowerCamelCase__ : Union[str, Any] =(1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case_ , snake_case_ )
class A_ ( A__ ):
"""simple docstring"""
pass
class A_ ( nn.Module ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
super().__init__()
lowerCamelCase__ : str =DiffusionAttnUnetaD(lowerCamelCase_ , n_attn_layers=4 )
lowerCamelCase__ : Union[str, Any] =deepcopy(self.diffusion )
lowerCamelCase__ : Any =torch.quasirandom.SobolEngine(1 , scramble=lowerCamelCase_ )
def lowerCAmelCase_ ( snake_case_ : Any ) ->List[str]:
lowerCamelCase__ : str =MODELS_MAP[model_name]['url']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
lowerCAmelCase = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
lowerCAmelCase = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
lowerCAmelCase = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
lowerCAmelCase = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
lowerCAmelCase = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
lowerCAmelCase = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->Optional[int]:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) ->List[str]:
for key, value in ATTN_MAP.items():
if name.startswith(snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
return name.replace(snake_case_ , snake_case_ )
elif name.startswith(snake_case_ ):
return [name.replace(snake_case_ , snake_case_ ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Tuple=1_3 ) ->int:
lowerCamelCase__ : Tuple =input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
lowerCamelCase__ : Optional[int] =0
if string.startswith('net.3.' ):
depth += 1
lowerCamelCase__ : Union[str, Any] =string[6:]
elif string.startswith('net.' ):
lowerCamelCase__ : Tuple =string[4:]
while string.startswith('main.7.' ):
depth += 1
lowerCamelCase__ : Union[str, Any] =string[7:]
if string.startswith('main.' ):
lowerCamelCase__ : Tuple =string[5:]
# mid block
if string[:2].isdigit():
lowerCamelCase__ : Any =string[:2]
lowerCamelCase__ : str =string[2:]
else:
lowerCamelCase__ : Union[str, Any] =string[0]
lowerCamelCase__ : List[str] =string[1:]
if depth == max_depth:
lowerCamelCase__ : Dict =MID_NUM_TO_LAYER[layer_num]
lowerCamelCase__ : Optional[int] ='mid_block'
elif depth > 0 and int(snake_case_ ) < 7:
lowerCamelCase__ : Optional[Any] =DOWN_NUM_TO_LAYER[layer_num]
lowerCamelCase__ : Union[str, Any] =f"""down_blocks.{depth}"""
elif depth > 0 and int(snake_case_ ) > 7:
lowerCamelCase__ : Any =UP_NUM_TO_LAYER[layer_num]
lowerCamelCase__ : Union[str, Any] =f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowerCamelCase__ : List[Any] =DEPTH_0_TO_LAYER[layer_num]
lowerCamelCase__ : Any =f"""up_blocks.{max_depth - 1}""" if int(snake_case_ ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
lowerCamelCase__ : str =string_left[1:]
if "resnets" in new_layer:
lowerCamelCase__ : Union[str, Any] =convert_resconv_naming(snake_case_ )
elif "attentions" in new_layer:
lowerCamelCase__ : Optional[Any] =convert_attn_naming(snake_case_ )
lowerCamelCase__ : List[Any] =new_string_left
if not isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : Optional[int] =prefix + '.' + new_layer + '.' + string_left
else:
lowerCamelCase__ : List[str] =[prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Optional[int]:
lowerCamelCase__ : Tuple ={}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
lowerCamelCase__ : Dict =rename(snake_case_ )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case_ , snake_case_ ):
lowerCamelCase__ : str =transform_conv_attns(snake_case_ , snake_case_ , snake_case_ )
else:
lowerCamelCase__ : Optional[int] =v
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Any ) ->Optional[int]:
if len(snake_case_ ) == 1:
if len(v.shape ) == 3:
# weight
lowerCamelCase__ : Any =v[:, :, 0]
else:
# bias
lowerCamelCase__ : Tuple =v
else:
# qkv matrices
lowerCamelCase__ : str =v.shape[0]
lowerCamelCase__ : str =trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowerCamelCase__ : Dict =v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowerCamelCase__ : Any =v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Tuple:
lowerCamelCase__ : Optional[Any] =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCamelCase__ : str =args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowerCamelCase__ : Optional[Any] =download(snake_case_ )
lowerCamelCase__ : int =MODELS_MAP[model_name]['sample_rate']
lowerCamelCase__ : List[Any] =MODELS_MAP[model_name]['sample_size']
lowerCamelCase__ : Tuple =Object()
lowerCamelCase__ : Any =sample_size
lowerCamelCase__ : List[str] =sample_rate
lowerCamelCase__ : List[str] =0
lowerCamelCase__ : Optional[Any] =UNetaDModel(sample_size=snake_case_ , sample_rate=snake_case_ )
lowerCamelCase__ : Tuple =diffusers_model.state_dict()
lowerCamelCase__ : Union[str, Any] =DiffusionUncond(snake_case_ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case_ )['state_dict'] )
lowerCamelCase__ : Optional[int] =orig_model.diffusion_ema.eval()
lowerCamelCase__ : List[str] =orig_model.state_dict()
lowerCamelCase__ : str =rename_orig_weights(snake_case_ )
lowerCamelCase__ : List[Any] =set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowerCamelCase__ : Any =set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case_ ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(snake_case_ ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowerCamelCase__ : int =value.squeeze()
lowerCamelCase__ : Union[str, Any] =value
diffusers_model.load_state_dict(snake_case_ )
lowerCamelCase__ : List[str] =1_0_0
lowerCamelCase__ : Union[str, Any] =3_3
lowerCamelCase__ : Union[str, Any] =IPNDMScheduler(num_train_timesteps=snake_case_ )
lowerCamelCase__ : str =torch.manual_seed(snake_case_ )
lowerCamelCase__ : Union[str, Any] =torch.randn([1, 2, config.sample_size] , generator=snake_case_ ).to(snake_case_ )
lowerCamelCase__ : int =torch.linspace(1 , 0 , steps + 1 , device=snake_case_ )[:-1]
lowerCamelCase__ : Dict =get_crash_schedule(snake_case_ )
lowerCamelCase__ : Tuple =DanceDiffusionPipeline(unet=snake_case_ , scheduler=snake_case_ )
lowerCamelCase__ : Optional[int] =torch.manual_seed(3_3 )
lowerCamelCase__ : Optional[Any] =pipe(num_inference_steps=snake_case_ , generator=snake_case_ ).audios
lowerCamelCase__ : str =sampling.iplms_sample(snake_case_ , snake_case_ , snake_case_ , {} )
lowerCamelCase__ : Dict =generated.clamp(-1 , 1 )
lowerCamelCase__ : List[str] =(generated - audio).abs().sum()
lowerCamelCase__ : str =(generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case_ )
print('Diff max' , snake_case_ )
assert diff_max < 1E-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase = parser.parse_args()
main(args)
| 174
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
lowerCamelCase__ : int =dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCamelCase__ : Any =dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : int =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : Tuple =dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any ={'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : List[Any] ={'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : Dict =Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict =1
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : int =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : str =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Union[str, Any] =FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Any =faiss.IndexFlat(5 )
lowerCamelCase__ : Any =FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : int =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : Any =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : str =1
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
import faiss
lowerCamelCase__ : List[str] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] ='index.faiss'
lowerCamelCase__ : Optional[Any] =f"""mock://{index_name}"""
index.save(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : Dict =FaissIndex.load(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : List[Any] =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ , lowerCamelCase__ : List[str] =index.search(snake_case_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Union[str, Any] =Elasticsearch()
lowerCamelCase__ : int ={'acknowledged': True}
lowerCamelCase__ : Optional[Any] =ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Union[str, Any] ='foo'
lowerCamelCase__ : Optional[Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : List[str] ='foo'
lowerCamelCase__ : Union[str, Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] =['foo', 'bar', 'foobar']
lowerCamelCase__ : str ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =index.search_batch(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =[scores[0] for scores in total_scores]
lowerCamelCase__ : Dict =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCamelCase__ : str =['foo', 'bar', 'foobar']
lowerCamelCase__ : Any ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : int =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
| 174
| 1
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __UpperCamelCase : Any ) -> Any:
"""simple docstring"""
snake_case__ : Any = str(id_ )
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = {} # {vertex:distance}
def __lt__( self : int , __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
return self.key < other.key
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return self.id
def lowerCAmelCase ( self : List[str] , __UpperCamelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.neighbors.append(__A )
def lowerCAmelCase ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = weight
def __UpperCAmelCase ( UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int ) -> List[str]:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def __UpperCAmelCase ( UpperCamelCase__ :list , UpperCamelCase__ :Vertex ) -> Any:
snake_case__ : str = []
for u in graph:
snake_case__ : Any = math.inf
snake_case__ : List[str] = None
snake_case__ : Tuple = 0
snake_case__ : Tuple = graph[:]
while q:
snake_case__ : List[str] = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
snake_case__ : int = u
snake_case__ : Optional[Any] = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __UpperCAmelCase ( UpperCamelCase__ :list , UpperCamelCase__ :Vertex ) -> str:
for u in graph:
snake_case__ : Any = math.inf
snake_case__ : Tuple = None
snake_case__ : int = 0
snake_case__ : Dict = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
snake_case__ : Dict = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
snake_case__ : List[Any] = u
snake_case__ : List[str] = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __UpperCAmelCase ( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
A__ = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A__ = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
A__ = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
A__ = field(
default=lowercase__, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A__ = field(
default=lowercase__, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={'help': 'A csv or a json file containing the training data.'} )
A__ = field(
default=lowercase__, metadata={'help': 'A csv or a json file containing the validation data.'} )
A__ = field(default=lowercase__, metadata={'help': 'A csv or a json file containing the test data.'} )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
snake_case__ : int = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case__ : str = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _SCREAMING_SNAKE_CASE :
A__ = field(
default=lowercase__, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ = field(
default=lowercase__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ = field(
default=lowercase__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ = field(
default=lowercase__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
A__ = field(
default=lowercase__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
A__ = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case__ : Optional[int] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case__ : int = data_args.train_file.split('''.''' )[-1]
snake_case__ : str = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case__ : List[str] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
snake_case__ : Any = load_dataset('''csv''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case__ : Union[str, Any] = load_dataset('''json''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case__ : List[Any] = raw_datasets['''train'''].features['''label'''].names
snake_case__ : Optional[Any] = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case__ : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCamelCase__ , )
snake_case__ : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case__ : List[str] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case__ : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case__ : List[Any] = {'''Refused''': 0, '''Entailed''': 1}
snake_case__ : Optional[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
snake_case__ : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(UpperCamelCase__ :Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(UpperCamelCase__ :List[Any] ):
snake_case__ : str = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
snake_case__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case__ : Optional[Any] = examples['''statement''']
snake_case__ : str = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
snake_case__ : Tuple = tokenizer(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ )
snake_case__ : List[str] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
snake_case__ : str = raw_datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
snake_case__ : Optional[int] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
snake_case__ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
snake_case__ : int = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
snake_case__ : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
snake_case__ : Union[str, Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
snake_case__ : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ :EvalPrediction ):
snake_case__ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
snake_case__ : Dict = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case__ : str = default_data_collator
elif training_args.fpaa:
snake_case__ : List[str] = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
snake_case__ : str = None
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
snake_case__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : List[Any] = last_checkpoint
snake_case__ : Union[str, Any] = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
snake_case__ : Dict = train_result.metrics
snake_case__ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
snake_case__ : Any = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCamelCase__ )
snake_case__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
snake_case__ : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case__ : Any = predict_dataset.remove_columns('''label''' )
snake_case__ : Union[str, Any] = trainer.predict(UpperCamelCase__ , metric_key_prefix='''predict''' ).predictions
snake_case__ : Optional[Any] = np.argmax(UpperCamelCase__ , axis=1 )
snake_case__ : str = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(UpperCamelCase__ ):
snake_case__ : Tuple = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
snake_case__ : List[str] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 574
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Optional[Any] ={
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =[
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 148
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case ):
UpperCAmelCase : str = (CMStochasticIterativeScheduler,)
UpperCAmelCase : int = 10
def UpperCamelCase ( self : Dict , **a_ : List[str] ) -> Any:
snake_case: Any ={
'num_train_timesteps': 2_0_1,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a_ )
return config
def UpperCamelCase ( self : List[Any] ) -> List[Any]:
snake_case: Any =1_0
snake_case: List[str] =self.get_scheduler_config()
snake_case: List[Any] =self.scheduler_classes[0](**a_ )
scheduler.set_timesteps(a_ )
snake_case: Dict =scheduler.timesteps[0]
snake_case: Union[str, Any] =scheduler.timesteps[1]
snake_case: List[str] =self.dummy_sample
snake_case: List[str] =0.1 * sample
snake_case: int =scheduler.step(a_ , a_ , a_ ).prev_sample
snake_case: Optional[Any] =scheduler.step(a_ , a_ , a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self : int ) -> int:
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a_ )
def UpperCamelCase ( self : Tuple ) -> List[str]:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: List[Any] =self.get_scheduler_config()
snake_case: Any =scheduler_class(**a_ )
snake_case: Dict =1
scheduler.set_timesteps(a_ )
snake_case: List[Any] =scheduler.timesteps
snake_case: Optional[Any] =torch.manual_seed(0 )
snake_case: Optional[Any] =self.dummy_model()
snake_case: List[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a_ ):
# 1. scale model input
snake_case: Any =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: List[str] =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: Dict =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: List[Any] =pred_prev_sample
snake_case: Optional[Any] =torch.sum(torch.abs(a_ ) )
snake_case: Optional[Any] =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1E-3
def UpperCamelCase ( self : Dict ) -> Union[str, Any]:
snake_case: Dict =self.scheduler_classes[0]
snake_case: Tuple =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: List[Any] =[1_0_6, 0]
scheduler.set_timesteps(timesteps=a_ )
snake_case: Optional[Any] =scheduler.timesteps
snake_case: Dict =torch.manual_seed(0 )
snake_case: Optional[int] =self.dummy_model()
snake_case: Any =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
snake_case: List[Any] =scheduler.scale_model_input(a_ , a_ )
# 2. predict noise residual
snake_case: Any =model(a_ , a_ )
# 3. predict previous sample x_t-1
snake_case: List[str] =scheduler.step(a_ , a_ , a_ , generator=a_ ).prev_sample
snake_case: Optional[Any] =pred_prev_sample
snake_case: Union[str, Any] =torch.sum(torch.abs(a_ ) )
snake_case: Tuple =torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1E-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1E-3
def UpperCamelCase ( self : int ) -> Tuple:
snake_case: List[Any] =self.scheduler_classes[0]
snake_case: Union[str, Any] =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: str =[3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(a_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a_ )
def UpperCamelCase ( self : Dict ) -> Optional[int]:
snake_case: Optional[Any] =self.scheduler_classes[0]
snake_case: Dict =self.get_scheduler_config()
snake_case: str =scheduler_class(**a_ )
snake_case: Any =[3_9, 3_0, 1_2, 1, 0]
snake_case: List[Any] =len(a_ )
with self.assertRaises(a_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a_ , timesteps=a_ )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
snake_case: Any =self.scheduler_classes[0]
snake_case: int =self.get_scheduler_config()
snake_case: Optional[Any] =scheduler_class(**a_ )
snake_case: List[Any] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
a_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a_ )
| 350
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __snake_case : str = "" ) -> dict[str, float]:
lowercase : Optional[int] = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase : Optional[Any] = BeautifulSoup(requests.get(__snake_case ).text , "html.parser" )
lowercase : Tuple = soup.find_all("td" , attrs="titleColumn" )
lowercase : Optional[Any] = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__snake_case , __snake_case )
}
def __magic_name__ ( __snake_case : str = "IMDb_Top_250_Movies.csv" ) -> None:
lowercase : Tuple = get_imdb_top_aaa_movies()
with open(__snake_case , "w" , newline="" ) as out_file:
lowercase : Tuple = csv.writer(__snake_case )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 518
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A : int = 20_48
_A : List[Any] = 40_96
_A : Any = 42
_A : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( __snake_case : Dict ) -> Optional[Any]:
def choose_first(__snake_case : Any , __snake_case : str=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
lowercase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase : Any = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase : Any = {"id": example["id"]}
lowercase : List[str] = example["annotations"]
lowercase : Optional[int] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase : Optional[int] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase : List[Any] = []
lowercase : Dict = []
lowercase : str = ["<cls>"]
else:
lowercase : int = ["short"]
lowercase : Optional[int] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase : Dict = ["long"]
lowercase : Optional[int] = choose_first(annotation["long_answer"] , is_long_answer=__snake_case )
lowercase : int = []
answer.update(__snake_case )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase : str = True
else:
lowercase : List[str] = False
lowercase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Tuple=False ) -> Union[str, Any]:
lowercase : Tuple = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : Any = example["document"]["tokens"]
lowercase : List[str] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase : Optional[int] = example["document"]["tokens"]
lowercase : Union[str, Any] = answer["start_token"]
lowercase : List[str] = answer["end_token"]
lowercase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase : Dict = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase : List[str] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase : Dict = " ".join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __snake_case , end="\n" )
print("Old:" , __snake_case , end="\n\n" )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : int=2048 , __snake_case : Optional[Any]=4096 , __snake_case : int=True ) -> Tuple:
# overlap will be of doc_stride - q_len
lowercase : List[Any] = get_context_and_ans(__snake_case , assertion=__snake_case )
lowercase : List[Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase : Tuple = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : List[str] = []
lowercase : Optional[int] = []
lowercase : Any = input_ids[:q_len]
lowercase : List[Any] = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase : List[Any] = i + max_length - q_len
lowercase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__snake_case ),
"end_token": [-100] * len(__snake_case ),
"category": category,
},
}
lowercase : List[str] = out["context"].split()
lowercase : Tuple = splitted_context[answer["end_token"]]
lowercase : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__snake_case , ).input_ids )
lowercase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase : List[str] = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase : Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase : Tuple = answer["start_token"]
lowercase : Optional[Any] = answer["end_token"]
if assertion:
lowercase : str = tokenizer.decode(__snake_case )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __snake_case , end="\n\n" )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase : Dict = input_ids[:q_len]
lowercase : Any = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
lowercase : List[str] = []
lowercase : Any = []
lowercase : Dict = []
lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase : List[str] = i + max_length - q_len
lowercase : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase : List[Any] = start_token - i + q_len
lowercase : str = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase : List[Any] = -100
lowercase : Optional[int] = -100
answers_category.append("null" )
lowercase : Optional[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__snake_case ) )
print("Old:" , tokenizer.decode(__snake_case ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=2048 , __snake_case : Optional[int]=4096 , __snake_case : int=False ) -> List[str]:
lowercase : List[str] = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
with jsonlines.open(__snake_case , "a" ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc="Saving samples ... " ):
lowercase : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A : Union[str, Any] = load_dataset("""natural_questions""")
_A : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A : Dict = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A : int = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 518
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase_ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCamelCase_ = {
"""gpt-neox-20b""": 2048,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : int=False , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowercase : Tuple =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space:
lowercase : Any =getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) )
lowercase : List[str] =add_prefix_space
lowercase : List[str] =pre_tok_class(**UpperCAmelCase__ )
lowercase : Optional[int] =add_prefix_space
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase : int =self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : "Conversation" ):
'''simple docstring'''
lowercase : Dict =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
lowercase : Optional[Any] =input_ids[-self.model_max_length :]
return input_ids
| 92
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if len(_SCREAMING_SNAKE_CASE ) < k or k < 0:
raise ValueError('Invalid Input' )
lowercase__ = lowercase__ = sum(array[:k] )
for i in range(len(_SCREAMING_SNAKE_CASE ) - k ):
lowercase__ = current_sum - array[i] + array[i + k]
lowercase__ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase_ = [randint(-1_000, 1_000) for i in range(100)]
lowercase_ = randint(0, 110)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 235
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A = ['bert-base-uncased', 'bert-base-cased']
A = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class UpperCAmelCase__ ( tf.keras.Model ):
def __init__( self : Tuple , snake_case : Optional[int] ) -> List[str]:
'''simple docstring'''
super().__init__()
A = tokenizer
A = AutoConfig.from_pretrained(snake_case )
A = TFAutoModel.from_config(snake_case )
def A_ ( self : Any , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
A = self.tokenizer(snake_case )
A = self.bert(**snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
A = [
BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
A = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
A = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
A = tokenizer(snake_case , return_tensors='tf' , padding='longest' )
A = tf_tokenizer(snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = tf_tokenizer(self.paired_sentences )
A = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = tf.function(snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
A = tf.constant(snake_case )
A = compiled_tokenizer(snake_case )
A = tf_tokenizer(snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A = ModelToSave(tokenizer=snake_case )
A = tf.convert_to_tensor(self.test_sentences )
A = model(snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A = Path(snake_case ) / 'saved.model'
model.save(snake_case )
A = tf.keras.models.load_model(snake_case )
A = loaded_model(snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 109
| 1
|
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a: Tuple = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__a: Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''mask2former'''
_lowerCamelCase = ['''swin''']
_lowerCamelCase = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Tuple , lowerCamelCase : Optional[Dict] = None , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 256 , lowerCamelCase : int = 1024 , lowerCamelCase : str = "relu" , lowerCamelCase : int = 6 , lowerCamelCase : int = 10 , lowerCamelCase : int = 8 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 2048 , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : int = 4 , lowerCamelCase : int = 255 , lowerCamelCase : int = 100 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : float = 5.0 , lowerCamelCase : int = 1_2544 , lowerCamelCase : float = 3.0 , lowerCamelCase : float = 0.75 , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1.0 , lowerCamelCase : bool = True , lowerCamelCase : List[int] = [4, 8, 16, 32] , lowerCamelCase : bool = None , **lowerCamelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
_UpperCAmelCase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(lowerCamelCase , lowerCamelCase ):
_UpperCAmelCase = backbone_config.pop("""model_type""" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(lowerCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = feature_size
_UpperCAmelCase = mask_feature_size
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = encoder_feedforward_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = dim_feedforward
_UpperCAmelCase = pre_norm
_UpperCAmelCase = enforce_input_projection
_UpperCAmelCase = common_stride
_UpperCAmelCase = ignore_value
_UpperCAmelCase = num_queries
_UpperCAmelCase = no_object_weight
_UpperCAmelCase = class_weight
_UpperCAmelCase = mask_weight
_UpperCAmelCase = dice_weight
_UpperCAmelCase = train_num_points
_UpperCAmelCase = oversample_ratio
_UpperCAmelCase = importance_sample_ratio
_UpperCAmelCase = init_std
_UpperCAmelCase = init_xavier_std
_UpperCAmelCase = use_auxiliary_loss
_UpperCAmelCase = feature_strides
_UpperCAmelCase = output_auxiliary_logits
_UpperCAmelCase = decoder_layers
super().__init__(**lowerCamelCase )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , lowerCamelCase : PretrainedConfig , **lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( self : Optional[Any] ) -> Dict[str, any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 108
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = '''Hello world! cécé herlolip'''
__magic_name__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : str = BertAbsConfig(
temp_dir="." , finetune_bert=lowerCAmelCase_ , large=lowerCAmelCase_ , share_emb=lowerCAmelCase_ , use_bert_emb=lowerCAmelCase_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCamelCase_ : Dict = torch.load(lowerCAmelCase_ , lambda lowerCAmelCase_ , lowerCAmelCase_: storage)
lowerCamelCase_ : Optional[int] = AbsSummarizer(lowerCAmelCase_ , torch.device("cpu") , lowerCAmelCase_)
original.eval()
lowerCamelCase_ : str = BertAbsSummarizer(lowerCAmelCase_ , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
lowerCamelCase_ : List[Any] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
lowerCamelCase_ : List[Any] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCAmelCase_)))
lowerCamelCase_ : List[Any] = torch.tensor(lowerCAmelCase_).unsqueeze(0)
lowerCamelCase_ : Optional[int] = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCAmelCase_)))
lowerCamelCase_ : List[Any] = torch.tensor(lowerCAmelCase_).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
lowerCamelCase_ : Union[str, Any] = encoder_input_ids
lowerCamelCase_ : Union[str, Any] = decoder_input_ids
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Tuple = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCamelCase_ : int = original(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)[0]
lowerCamelCase_ : Any = original.generator(lowerCAmelCase_)
lowerCamelCase_ : Optional[int] = new_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_)[0]
lowerCamelCase_ : Dict = new_model.generator(lowerCAmelCase_)
lowerCamelCase_ : str = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCAmelCase_))
lowerCamelCase_ : int = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(lowerCAmelCase_))
lowerCamelCase_ : str = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__magic_name__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 250
| 0
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a__ : Dict =False
try:
a__ : int =_is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class snake_case :
"""simple docstring"""
def __init__( self : Tuple , __A : str = None , __A : list = [] ):
__UpperCamelCase = 0
__UpperCamelCase = choices
__UpperCamelCase = prompt
if sys.platform == "win32":
__UpperCamelCase = '*'
else:
__UpperCamelCase = '➔ '
def _lowerCamelCase ( self : Tuple , __A : List[str] , __A : str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , __A )
else:
forceWrite(self.choices[index] , __A )
def _lowerCamelCase ( self : List[Any] , __A : int ):
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(__A )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def _lowerCamelCase ( self : Any , __A : Direction , __A : int = 1 ):
__UpperCamelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__A )
move_cursor(__A , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _lowerCamelCase ( self : List[str] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _lowerCamelCase ( self : Union[str, Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _lowerCamelCase ( self : List[Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _lowerCamelCase ( self : Any ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__A )] for number in range(1_0 )] )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = int(chr(self.current_selection ) )
__UpperCamelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __A )
else:
return
else:
return
def _lowerCamelCase ( self : Tuple , __A : int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__UpperCamelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__A )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__UpperCamelCase = int(builtins.input() )
except ValueError:
__UpperCamelCase = default_choice
else:
__UpperCamelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(__A , '\n' )
return choice
| 434
|
'''simple docstring'''
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.609_344,
"knot": 1.852,
}
a__ : dict[str, float] ={
"km/h": 1.0,
"m/s": 0.277_777_778,
"mph": 0.621_371_192,
"knot": 0.539_956_803,
}
def lowercase__ ( __lowercase : float , __lowercase : str , __lowercase : str ) -> float:
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCamelCase = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowercase )}'''
)
raise ValueError(__lowercase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : str , _snake_case : str ) -> int:
'''simple docstring'''
if len(_snake_case ) != len(_snake_case ):
raise ValueError('String lengths must match!' )
_A = 0
for chara, chara in zip(_snake_case , _snake_case ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( _snake_case : int ) -> datetime:
'''simple docstring'''
_A = year % 19
_A = year % 4
_A = year % 7
_A = math.floor(year / 1_00 )
_A = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_A = leap_day_inhibits / 4
_A = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_A = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_A = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_A = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_snake_case , 4 , 18 )
else:
return datetime(_snake_case , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
a = '''will be''' if year > datetime.now().year else '''was'''
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 7
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = 'distilbert'
UpperCAmelCase : Optional[Any] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self : str , __snake_case : List[Any]=30522 , __snake_case : int=512 , __snake_case : List[str]=False , __snake_case : Tuple=6 , __snake_case : List[str]=12 , __snake_case : List[Any]=768 , __snake_case : List[Any]=4 * 768 , __snake_case : List[str]=0.1 , __snake_case : str=0.1 , __snake_case : Optional[int]="gelu" , __snake_case : Tuple=0.02 , __snake_case : Any=0.1 , __snake_case : Optional[int]=0.2 , __snake_case : str=0 , **__snake_case : Union[str, Any] , ) -> str:
_a : Tuple = vocab_size
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = sinusoidal_pos_embds
_a : Tuple = n_layers
_a : List[str] = n_heads
_a : Optional[Any] = dim
_a : Union[str, Any] = hidden_dim
_a : List[str] = dropout
_a : Optional[Any] = attention_dropout
_a : Optional[Any] = activation
_a : Union[str, Any] = initializer_range
_a : List[Any] = qa_dropout
_a : List[Any] = seq_classif_dropout
super().__init__(**__snake_case , pad_token_id=__snake_case )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def snake_case_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 249
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : Any , __snake_case : List[str]=13 , __snake_case : Dict=2 , __snake_case : Dict=24 , __snake_case : Optional[int]=16 , __snake_case : Optional[Any]=True , __snake_case : Any=True , __snake_case : Dict=32 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[Any]=37 , __snake_case : str="gelu" , __snake_case : Dict=0.1 , __snake_case : int=0.1 , __snake_case : Optional[int]=10 , __snake_case : str=0.02 , __snake_case : Any=None , __snake_case : Optional[Any]=2 , __snake_case : Optional[Any]=2 , ) -> Optional[Any]:
_a : str = parent
_a : Dict = batch_size
_a : Union[str, Any] = patch_size
_a : int = max_length
_a : Dict = num_mel_bins
_a : str = is_training
_a : Optional[Any] = use_labels
_a : List[Any] = hidden_size
_a : Union[str, Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : List[Any] = intermediate_size
_a : Any = hidden_act
_a : Union[str, Any] = hidden_dropout_prob
_a : Tuple = attention_probs_dropout_prob
_a : Any = type_sequence_label_size
_a : Any = initializer_range
_a : Optional[Any] = scope
_a : List[Any] = frequency_stride
_a : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_a : int = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_a : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
_a : str = frequency_out_dimension * time_out_dimension
_a : Union[str, Any] = num_patches + 2
def snake_case_ ( self : List[str] ) -> List[Any]:
_a : str = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[int] = self.get_config()
return config, input_values, labels
def snake_case_ ( self : Dict ) -> Optional[int]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def snake_case_ ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> Tuple:
_a : Any = ASTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : Any = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Union[str, Any] ) -> str:
_a : Optional[int] = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : Tuple = config_and_inputs
_a : Tuple = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : int = False
def snake_case_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Optional[int] ) -> List[Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def snake_case_ ( self : List[str] ) -> List[Any]:
_a : str = ASTModelTester(self )
_a : str = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : int ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def snake_case_ ( self : Optional[int] ) -> Optional[int]:
pass
def snake_case_ ( self : Any ) -> int:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def snake_case_ ( self : Optional[Any] ) -> Optional[int]:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(__snake_case )
_a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Union[str, Any] = ['''input_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@slow
def snake_case_ ( self : Any ) -> List[str]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Tuple = ASTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : int = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
_a , _a : Tuple = torchaudio.load(UpperCamelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : List[Any] ) -> List[str]:
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def snake_case_ ( self : Union[str, Any] ) -> Dict:
_a : List[Any] = self.default_feature_extractor
_a : List[str] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__snake_case )
_a : Union[str, Any] = self.default_feature_extractor
_a , _a : str = prepare_audio()
_a : List[str] = audio.squeeze().numpy()
_a : str = feature_extractor(__snake_case , sampling_rate=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : str = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
| 249
| 1
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = PriorTransformer
_UpperCAmelCase = """hidden_states"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 4
SCREAMING_SNAKE_CASE_ : int = 8
SCREAMING_SNAKE_CASE_ : Optional[Any] = 7
SCREAMING_SNAKE_CASE_ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Tuple = 8
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : str = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (4, 8)
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
SCREAMING_SNAKE_CASE_ : Tuple = model.to(lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , 'set_default_attn_processor' ):
model.set_default_attn_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_seed_input()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : List[str] = output[0, :5].flatten().cpu()
print(lowerCAmelCase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-2 ) )
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self , lowerCAmelCase__=1 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=7_7 , lowerCAmelCase__=0 ):
"""simple docstring"""
torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = embedding_dim
SCREAMING_SNAKE_CASE_ : Dict = num_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[3_7, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_seed_input(seed=lowerCAmelCase__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowerCAmelCase__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sample[0, :8].flatten().cpu()
print(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(lowerCAmelCase__ )
assert torch_all_close(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
| 101
|
'''simple docstring'''
def lowerCamelCase__ ( a ):
__snake_case = int(a )
if n_element < 1:
__snake_case = ValueError('a should be a positive number' )
raise my_error
__snake_case = [1]
__snake_case , __snake_case , __snake_case = (0, 0, 0)
__snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_lowercase = hamming(int(n))
print("""-----------------------------------------------------""")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("""-----------------------------------------------------""")
| 356
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase__ ( a_ : str , a_ : list[str] | None = None , a_ : dict[str, float] | None = None , a_ : bool = False , ) -> tuple[int, float, str]:
UpperCAmelCase__ : List[Any] = cipher_alphabet or [chr(a_ ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCAmelCase__ : Tuple = {
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
UpperCAmelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
UpperCAmelCase__ : Tuple = ciphertext.lower()
# Chi squared statistic values
UpperCAmelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(a_ ) ):
UpperCAmelCase__ : Tuple = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCAmelCase__ : List[str] = (alphabet_letters.index(letter.lower() ) - shift) % len(
a_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCAmelCase__ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCAmelCase__ : Any = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ : Union[str, Any] = decrypted_with_shift.lower().count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ : Tuple = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCAmelCase__ : List[Any] = decrypted_with_shift.count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCAmelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCAmelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCAmelCase__ : List[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCAmelCase__ : int = min(
a_ , key=a_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 599
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def lowerCAmelCase__ ( a_ : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def lowerCAmelCase__ ( a_ : str = "" ) -> bool:
if len(a_ ) == 0:
return True
UpperCAmelCase__ : int = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
UpperCAmelCase__ : dict[str, int] = {}
for character in lower_case_input_str:
UpperCAmelCase__ : Optional[Any] = character_freq_dict.get(a_ , 0 ) + 1
UpperCAmelCase__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowerCAmelCase__ ( a_ : str = "" ) -> None:
print('''\nFor string = ''' , a_ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(a_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(a_ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
UpperCamelCase_ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCamelCase_ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 599
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
SCREAMING_SNAKE_CASE_ : str = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).qformer_tokenizer
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE_ : int = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : str = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE_ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 101
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_A : str = False
try:
_A : Dict = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self , _a = None , _a = [] ):
lowercase : Union[str, Any] = 0
lowercase : str = choices
lowercase : List[Any] = prompt
if sys.platform == "win32":
lowercase : Union[str, Any] = "*"
else:
lowercase : int = "➔ "
def __magic_name__ ( self , _a , _a = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def __magic_name__ ( self , _a ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(_a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __magic_name__ ( self , _a , _a = 1 ):
lowercase : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __magic_name__ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __magic_name__ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def __magic_name__ ( self ):
lowercase : List[str] = int(chr(self.current_selection ) )
lowercase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def __magic_name__ ( self , _a = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase : Tuple = int(builtins.input() )
except ValueError:
lowercase : Optional[int] = default_choice
else:
lowercase : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_a , "\n" )
return choice
| 361
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowercase = {'''tokenization_bertweet''': ['''BertweetTokenizer''']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''roberta-prelayernorm'''
def __init__( self , _snake_case=5_0265 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case="absolute" , _snake_case=True , _snake_case=None , **_snake_case , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Tuple = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : str = position_embedding_type
_lowerCAmelCase : int = use_cache
_lowerCAmelCase : Optional[Any] = classifier_dropout
class __A ( snake_case__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.task == "multiple-choice":
_lowerCAmelCase : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 424
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
snake_case = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
snake_case = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
snake_case = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case = 1 , _snake_case = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 424
| 1
|
'''simple docstring'''
def snake_case_ ():
UpperCAmelCase = []
UpperCAmelCase = 1
while len(_a ) < 1E6:
constant.append(str(_a ) )
i += 1
UpperCAmelCase = ''''''.join(_a )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 358
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( __a , unittest.TestCase ):
__a : Tuple = ShapEPipeline
__a : Tuple = ["""prompt"""]
__a : Tuple = ["""prompt"""]
__a : str = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__a : Union[str, Any] = False
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A ( self : Dict ):
'''simple docstring'''
return 8
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowercase )
return model
@property
def A ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowercase )
return model
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def A ( self : Dict , lowercase : Optional[int] , lowercase : Optional[int]=0 ):
'''simple docstring'''
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowercase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : int ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowercase )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 358
| 1
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowerCAmelCase = NewType("DataClass", Any)
_lowerCAmelCase = NewType("DataClassType", Any)
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
if isinstance(a , a ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def UpperCamelCase ( a ) -> Callable[[str], Any]:
'''simple docstring'''
__magic_name__ = {str(a ): choice for choice in choices}
return lambda a : str_to_choice.get(a , a )
def UpperCamelCase ( *,
a = None , a = None , a = dataclasses.MISSING , a = dataclasses.MISSING , a = None , **a , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__magic_name__ = {}
if aliases is not None:
__magic_name__ = aliases
if help is not None:
__magic_name__ = help
return dataclasses.field(metadata=a , default=a , default_factory=a , **a )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Iterable[DataClassType]
def __init__( self : Tuple , a__ : Union[DataClassType, Iterable[DataClassType]] , **a__ : Tuple ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__magic_name__ = ArgumentDefaultsHelpFormatter
super().__init__(**a__ )
if dataclasses.is_dataclass(a__ ):
__magic_name__ = [dataclass_types]
__magic_name__ = list(a__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a__ )
@staticmethod
def snake_case__ ( a__ : ArgumentParser , a__ : dataclasses.Field ):
__magic_name__ = F'''--{field.name}'''
__magic_name__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a__ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__magic_name__ = kwargs.pop('''aliases''' , [] )
if isinstance(a__ , a__ ):
__magic_name__ = [aliases]
__magic_name__ = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(a__ , '''UnionType''' ) and isinstance(a__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a__ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F''' Problem encountered in field \'{field.name}\'.''' )
if type(a__ ) not in field.type.__args__:
# filter `str` in Union
__magic_name__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__magic_name__ = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__magic_name__ = (
field.type.__args__[0] if isinstance(a__ , field.type.__args__[1] ) else field.type.__args__[1]
)
__magic_name__ = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__magic_name__ = {}
if origin_type is Literal or (isinstance(field.type , a__ ) and issubclass(field.type , a__ )):
if origin_type is Literal:
__magic_name__ = field.type.__args__
else:
__magic_name__ = [x.value for x in field.type]
__magic_name__ = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__magic_name__ = field.default
else:
__magic_name__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__magic_name__ = copy(a__ )
# Hack because type=bool in argparse does not behave as we want.
__magic_name__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__magic_name__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__magic_name__ = default
# This tells argparse we accept 0 or 1 value after --field_name
__magic_name__ = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__magic_name__ = True
elif isclass(a__ ) and issubclass(a__ , a__ ):
__magic_name__ = field.type.__args__[0]
__magic_name__ = '''+'''
if field.default_factory is not dataclasses.MISSING:
__magic_name__ = field.default_factory()
elif field.default is dataclasses.MISSING:
__magic_name__ = True
else:
__magic_name__ = field.type
if field.default is not dataclasses.MISSING:
__magic_name__ = field.default
elif field.default_factory is not dataclasses.MISSING:
__magic_name__ = field.default_factory()
else:
__magic_name__ = True
parser.add_argument(a__ , *a__ , **a__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__magic_name__ = False
parser.add_argument(F'''--no_{field.name}''' , action='''store_false''' , dest=field.name , **a__ )
def snake_case__ ( self : Any , a__ : DataClassType ):
if hasattr(a__ , '''_argument_group_name''' ):
__magic_name__ = self.add_argument_group(dtype._argument_group_name )
else:
__magic_name__ = self
try:
__magic_name__ = get_type_hints(a__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a__ ):
__magic_name__ = '''.'''.join(map(a__ , sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(a__ ):
if not field.init:
continue
__magic_name__ = type_hints[field.name]
self._parse_dataclass_field(a__ , a__ )
def snake_case__ ( self : str , a__ : int=None , a__ : Dict=False , a__ : Dict=True , a__ : int=None , a__ : List[Any]=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__magic_name__ = []
if args_filename:
args_files.append(Path(a__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__magic_name__ = ArgumentParser()
args_file_parser.add_argument(a__ , type=a__ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__magic_name__ , __magic_name__ = args_file_parser.parse_known_args(args=a__ )
__magic_name__ = vars(a__ ).get(args_file_flag.lstrip('''-''' ) , a__ )
if cmd_args_file_paths:
args_files.extend([Path(a__ ) for p in cmd_args_file_paths] )
__magic_name__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__magic_name__ = file_args + args if args is not None else file_args + sys.argv[1:]
__magic_name__ , __magic_name__ = self.parse_known_args(args=a__ )
__magic_name__ = []
for dtype in self.dataclass_types:
__magic_name__ = {f.name for f in dataclasses.fields(a__ ) if f.init}
__magic_name__ = {k: v for k, v in vars(a__ ).items() if k in keys}
for k in keys:
delattr(a__ , a__ )
__magic_name__ = dtype(**a__ )
outputs.append(a__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def snake_case__ ( self : List[Any] , a__ : Dict[str, Any] , a__ : bool = False ):
__magic_name__ = set(args.keys() )
__magic_name__ = []
for dtype in self.dataclass_types:
__magic_name__ = {f.name for f in dataclasses.fields(a__ ) if f.init}
__magic_name__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__magic_name__ = dtype(**a__ )
outputs.append(a__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(a__ )}''' )
return tuple(a__ )
def snake_case__ ( self : Dict , a__ : str , a__ : bool = False ):
with open(Path(a__ ) , encoding='''utf-8''' ) as open_json_file:
__magic_name__ = json.loads(open_json_file.read() )
__magic_name__ = self.parse_dict(a__ , allow_extra_keys=a__ )
return tuple(a__ )
def snake_case__ ( self : str , a__ : str , a__ : bool = False ):
__magic_name__ = self.parse_dict(yaml.safe_load(Path(a__ ).read_text() ) , allow_extra_keys=a__ )
return tuple(a__ )
| 432
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_lowerCAmelCase = 0B10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_lowerCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] ):
__magic_name__ = WATERMARK_BITS
__magic_name__ = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def snake_case__ ( self : Optional[Any] , a__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__magic_name__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = [self.encoder.encode(a__ , '''dwtDct''' ) for image in images]
__magic_name__ = torch.from_numpy(np.array(a__ ) ).permute(0 , 3 , 1 , 2 )
__magic_name__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 432
| 1
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__snake_case = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
__snake_case = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
__snake_case = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
return float((preds == labels).mean() )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
SCREAMING_SNAKE_CASE__ = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def A_ ( self : List[str] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCAmelCase_ , UpperCAmelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 701
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowercase__ ( _UpperCAmelCase ):
A__ : torch.FloatTensor
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : List[Any] , UpperCAmelCase_ : int = 65536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE__ = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE__ = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE__ = TimestepEmbedding(
in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE__ = None
# down
SCREAMING_SNAKE_CASE__ = in_channels
for i, down_block_type in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE__ = i == len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE__ = get_down_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
SCREAMING_SNAKE_CASE__ = get_mid_block(
UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , )
# up
SCREAMING_SNAKE_CASE__ = list(reversed(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE__ = out_channels
else:
SCREAMING_SNAKE_CASE__ = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = output_channel
SCREAMING_SNAKE_CASE__ = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE__ = i == len(UpperCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE__ = get_up_block(
UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = output_channel
# out
SCREAMING_SNAKE_CASE__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE__ = get_out_block(
out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE__ = timestep
if not torch.is_tensor(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE__ = self.time_proj(UpperCAmelCase_ )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE__ = self.time_mlp(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = timestep_embed[..., None]
SCREAMING_SNAKE_CASE__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE__ = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE__ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE__ = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE__ = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE__ = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE__ = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase_ )
| 400
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase:
"""simple docstring"""
@staticmethod
def __a ( *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
pass
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowercase__ : Dict = np.array(__lowerCamelCase )
lowercase__ : List[str] = npimg.shape
return {"hash": hashimage(__lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : List[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
a : Optional[int] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = MaskGenerationPipeline(model=snake_case_ , image_processor=snake_case_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __a ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __a ( self ) -> List[str]:
"""simple docstring"""
pass
@slow
@require_torch
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
lowercase__ : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
lowercase__ : Any = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : Tuple = "facebook/sam-vit-huge"
lowercase__ : Optional[int] = pipeline("mask-generation" , model=snake_case_ )
lowercase__ : str = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowercase__ : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.00_53},
] , )
| 397
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCamelCase ( *snake_case_ : Any , **snake_case_ : str)-> int:
pass
@is_pipeline_test
@require_vision
class __a ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : Dict)-> List[str]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , )
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case_) , [
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}],
[{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """c"""}, {"""score""": 0.3_3_3, """label""": """b"""}],
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@require_tf
def UpperCamelCase ( self : Dict)-> Optional[Any]:
__lowerCAmelCase =pipeline(
model="""hf-internal-testing/tiny-random-clip-zero-shot-image-classification""" , framework="""tf""")
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""a""", """b""", """c"""])
self.assertEqual(
nested_simplify(snake_case_) , [{"""score""": 0.3_3_3, """label""": """a"""}, {"""score""": 0.3_3_3, """label""": """b"""}, {"""score""": 0.3_3_3, """label""": """c"""}] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""A""", """B""", """C"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
[
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
{"""score""": 0.3_3_3, """label""": ANY(snake_case_)},
],
] , )
@slow
@require_torch
def UpperCamelCase ( self : Any)-> Dict:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , )
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase ( self : Optional[int])-> int:
__lowerCAmelCase =pipeline(
task="""zero-shot-image-classification""" , model="""openai/clip-vit-base-patch32""" , framework="""tf""")
# This is an image of 2 cats with remotes and no planes
__lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__lowerCAmelCase =image_classifier(snake_case_ , candidate_labels=["""cat""", """plane""", """remote"""])
self.assertEqual(
nested_simplify(snake_case_) , [
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
] , )
__lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=["""cat""", """plane""", """remote"""] , batch_size=2)
self.assertEqual(
nested_simplify(snake_case_) , [
[
{"""score""": 0.5_1_1, """label""": """remote"""},
{"""score""": 0.4_8_5, """label""": """cat"""},
{"""score""": 0.0_0_4, """label""": """plane"""},
],
]
* 5 , )
| 354
| 0
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
UpperCamelCase_ = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = torch.load(__UpperCamelCase ,map_location='cpu' )
return sd
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: str ,__UpperCamelCase: int=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE : int = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Any = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : str = new_key.replace(name_pair[0] ,name_pair[1] )
SCREAMING_SNAKE_CASE : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = 'pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Dict = {'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {'visual_embedding_dim': 5_12}
SCREAMING_SNAKE_CASE : Union[str, Any] = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {'visual_embedding_dim': 20_48}
SCREAMING_SNAKE_CASE : Union[str, Any] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : List[Any] = {'visual_embedding_dim': 20_48, 'num_labels': 31_29}
SCREAMING_SNAKE_CASE : Dict = 'vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Tuple = {
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE : Dict = 'nlvr'
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**__UpperCamelCase )
# Load State Dict
SCREAMING_SNAKE_CASE : List[str] = load_state_dict(__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = get_new_dict(__UpperCamelCase ,__UpperCamelCase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(__UpperCamelCase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[int] = VisualBertForQuestionAnswering(__UpperCamelCase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForVisualReasoning(__UpperCamelCase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : str = VisualBertForMultipleChoice(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Save Checkpoints
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
UpperCamelCase_ = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 508
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _a :
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( *A, **A ):
'''simple docstring'''
pass
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase__( __UpperCamelCase: Image ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
A : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
A : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = MaskGenerationPipeline(model=A, image_processor=A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = pipeline('mask-generation', model='facebook/sam-vit-huge' )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg', points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : Any = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
], )
# fmt: on
@require_torch
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'facebook/sam-vit-huge'
SCREAMING_SNAKE_CASE : int = pipeline('mask-generation', model=A )
SCREAMING_SNAKE_CASE : Optional[Any] = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg', pred_iou_thresh=1, points_per_batch=256 )
# Shortening by hashing
SCREAMING_SNAKE_CASE : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(A, decimals=4 ), [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
], )
| 508
| 1
|
from datetime import datetime
import requests
def lowercase_ (A : str ):
snake_case__ : Dict = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
snake_case__ : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(A ).content
if __name__ == "__main__":
a_ :Any = input("Enter Video/IGTV url: ").strip()
a_ :Union[str, Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 478
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ :List[Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 478
| 1
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class a_ :
@staticmethod
def lowerCAmelCase__ ( *UpperCAmelCase , **UpperCAmelCase ):
pass
def UpperCamelCase_ ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowercase__ =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
lowerCamelCase__ : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ : Any = pipeline(
"""document-question-answering""" , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ : str = INVOICE_URL
a_ : Dict = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , """""" ) ) )
a_ : Optional[int] = """What is the placebo?"""
a_ : Dict = [
{
"""image""": load_image(UpperCAmelCase ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
a_ : Tuple = dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{"""score""": ANY(UpperCAmelCase ), """answer""": ANY(UpperCAmelCase ), """start""": ANY(UpperCAmelCase ), """end""": ANY(UpperCAmelCase )},
{"""score""": ANY(UpperCAmelCase ), """answer""": ANY(UpperCAmelCase ), """start""": ANY(UpperCAmelCase ), """end""": ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self ):
a_ : str = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
a_ : Dict = INVOICE_URL
a_ : int = """How many cats are there?"""
a_ : Union[str, Any] = [
{"""score""": 0.00_01, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.00_01, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
a_ : List[str] = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
a_ : Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a_ : int = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
a_ : Optional[Any] = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
a_ : List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
a_ : Optional[int] = []
a_ : Union[str, Any] = []
a_ : Optional[Any] = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self ):
a_ : Tuple = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
a_ : List[str] = INVOICE_URL
a_ : Union[str, Any] = """What is the invoice number?"""
a_ : Optional[int] = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
a_ : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
a_ : List[str] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.99_44, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.00_09, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowerCAmelCase__ ( self ):
a_ : Optional[int] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
a_ : str = INVOICE_URL
a_ : str = """What is the invoice number?"""
a_ : int = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
a_ : List[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
a_ : Optional[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.99_74, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.99_48, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self ):
a_ : Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase )
a_ : Union[str, Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase , revision="""3dc6de3""" , )
a_ : Tuple = INVOICE_URL
a_ : Union[str, Any] = """What is the invoice number?"""
a_ : str = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
a_ : Dict = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
a_ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
a_ : List[Any] = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
a_ : str = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.42_51, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.08_19, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowerCAmelCase__ ( self ):
a_ : List[str] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=UpperCAmelCase )
a_ : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=UpperCAmelCase , revision="""3dc6de3""" , max_seq_len=50 , )
a_ : Tuple = INVOICE_URL
a_ : Dict = """What is the invoice number?"""
a_ : Any = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
a_ : List[Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
a_ : Tuple = list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , """""" ) ) )
# This model should also work if `image` is set to None
a_ : List[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{"""score""": 0.99_99, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.99_98, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def lowerCAmelCase__ ( self ):
a_ : Optional[int] = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
a_ : Union[str, Any] = INVOICE_URL
a_ : str = """What is the invoice number?"""
a_ : Optional[Any] = dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def lowerCAmelCase__ ( self ):
pass
| 706
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowercase__ =logging.getLogger(__name__)
@dataclass
class a_ :
lowerCamelCase__ : str
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[str]]
@dataclass
class a_ :
lowerCamelCase__ : List[int]
lowerCamelCase__ : List[int]
lowerCamelCase__ : Optional[List[int]] = None
lowerCamelCase__ : Optional[List[int]] = None
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Any = 'train'
lowerCamelCase__ : Optional[int] = 'dev'
lowerCamelCase__ : int = 'test'
class a_ :
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase=1 , UpperCAmelCase="[SEP]" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=-1_00 , UpperCAmelCase=0 , UpperCAmelCase=True , ):
a_ = {label: i for i, label in enumerate(UpperCAmelCase )}
a_ = []
for ex_index, example in enumerate(UpperCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , UpperCAmelCase , len(UpperCAmelCase ) )
a_ = []
a_ = []
for word, label in zip(example.words , example.labels ):
a_ = tokenizer.tokenize(UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase ) > 0:
tokens.extend(UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a_ = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase ) > max_seq_length - special_tokens_count:
a_ = tokens[: (max_seq_length - special_tokens_count)]
a_ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a_ = [sequence_a_segment_id] * len(UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a_ = [cls_token] + tokens
a_ = [pad_token_label_id] + label_ids
a_ = [cls_token_segment_id] + segment_ids
a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a_ = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase )
# Zero-pad up to the sequence length.
a_ = max_seq_length - len(UpperCAmelCase )
if pad_on_left:
a_ = ([pad_token] * padding_length) + input_ids
a_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a_ = ([pad_token_segment_id] * padding_length) + segment_ids
a_ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(UpperCAmelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a_ = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , label_ids=UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ):
# Load data features from cache or dataset file
a_ = os.path.join(
UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
a_ = torch.load(UpperCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a_ = token_classification_task.convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a_ :
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = -100
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ):
a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a_ = token_classification_task.convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a_ = tf.data.Dataset.from_generator(
UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a_ = tf.data.Dataset.from_generator(
UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase__ ( self ):
a_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
| 511
| 0
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ : Optional[int] = ['''text''', '''image''', '''audio''']
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
inputs.append(create_inputs(_lowerCAmelCase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(_lowerCAmelCase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowerCAmelCase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowerCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCamelCase_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase ( self : List[Any] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase_ ) , self.tool.outputs )
def UpperCAmelCase ( self : Tuple ) ->Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def UpperCAmelCase ( self : List[Any] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase_ , self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase_ , lowerCamelCase_ ) )
def UpperCAmelCase ( self : List[str] ) ->str:
'''simple docstring'''
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase_ , self.tool.inputs ):
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase_ )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase_ ) , len(self.tool.outputs ) )
| 392
| 0
|
from __future__ import annotations
from typing import Any
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
pass
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Any = data
lowercase_ :Node | None = None
def __iter__( self ):
lowercase_ :Dict = self
lowercase_ :Dict = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(UpperCamelCase_ )
yield node.data
lowercase_ :int = node.next_node
@property
def UpperCamelCase ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = Node(1)
SCREAMING_SNAKE_CASE : List[Any] = Node(2)
SCREAMING_SNAKE_CASE : List[Any] = Node(3)
SCREAMING_SNAKE_CASE : int = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE : List[str] = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE : List[Any] = Node(5)
SCREAMING_SNAKE_CASE : Optional[Any] = Node(6)
SCREAMING_SNAKE_CASE : Optional[int] = Node(5)
SCREAMING_SNAKE_CASE : Tuple = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE : Optional[int] = Node(1)
print(root_node.has_loop) # False
| 441
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :Dict = 1
lowercase_ :Optional[Any] = 3
lowercase_ :Optional[int] = (32, 32)
lowercase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :List[str] = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Dict = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.dummy_vae
lowercase_ :Any = self.dummy_text_encoder
lowercase_ :List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :List[str] = 77
lowercase_ :int = self.dummy_image.to(UpperCamelCase_ )
lowercase_ :Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ :str = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :int = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Any = '''A painting of a squirrel eating a burger'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , )
lowercase_ :Dict = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ :int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :str = 77
lowercase_ :str = self.dummy_image.to(UpperCamelCase_ )
# put models in fp16
lowercase_ :Union[str, Any] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :List[Any] = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :List[str] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Any = alt_pipe(
[prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ :Optional[Any] = init_image.resize((760, 504) )
lowercase_ :List[str] = '''BAAI/AltDiffusion'''
lowercase_ :Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[Any] = torch.manual_seed(0 )
lowercase_ :str = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :Optional[Any] = output.images[0]
lowercase_ :Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ :Any = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ :Any = init_image.resize((768, 512) )
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowercase_ :List[Any] = '''BAAI/AltDiffusion'''
lowercase_ :Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :List[str] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[int] = torch.manual_seed(0 )
lowercase_ :Tuple = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 441
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
_lowerCamelCase : Tuple = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''vision-encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : Tuple , **UpperCAmelCase__ : List[str]) ->Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"""A configuraton of type {self.model_type} cannot be instantiated because """
f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""")
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : List[Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->float:
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}})
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = OrderedDict()
A__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
A__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
A__ = {0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
import torch
A__ = OrderedDict()
A__ = super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__)
A__ , A__ = dummy_input['''input_ids'''].shape
A__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
A__ = dummy_input.pop('''input_ids''')
A__ = dummy_input.pop('''attention_mask''')
A__ = torch.zeros(UpperCAmelCase__)
return common_inputs
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->None:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : PretrainedConfig) ->OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default") ->OnnxConfig:
'''simple docstring'''
A__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__)
| 87
|
class UpperCamelCase_ : # Public class to implement a graph
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = row
A__ = col
A__ = graph
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None:
'''simple docstring'''
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands.
'''simple docstring'''
A__ = [[False for j in range(self.COL)] for i in range(self.ROW)]
A__ = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
count += 1
return count
| 87
| 1
|
from __future__ import annotations
def UpperCamelCase ( lowerCAmelCase_ ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
UpperCAmelCase_ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 476
| 0
|
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_A = [0] * no_of_processes
_A = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__snake_case ):
_A = burst_time[i]
_A = 0
_A = 0
_A = 999_999_999
_A = 0
_A = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__snake_case ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_A = remaining_time[j]
_A = j
_A = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_A = remaining_time[short]
if minm == 0:
_A = 999_999_999
if remaining_time[short] == 0:
complete += 1
_A = False
# Find finish time of current process
_A = increment_time + 1
# Calculate waiting time
_A = finish_time - arrival_time[short]
_A = finar - burst_time[short]
if waiting_time[short] < 0:
_A = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> list[int]:
_A = [0] * no_of_processes
for i in range(__snake_case ):
_A = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case ) -> None:
_A = 0
_A = 0
for i in range(__snake_case ):
_A = total_waiting_time + waiting_time[i]
_A = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowerCamelCase = int(input())
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowerCamelCase , __lowerCamelCase = map(int, input().split())
__lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase = burst_time
__lowerCamelCase = no_of_processes
__lowerCamelCase = waiting_time
__lowerCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 317
|
from __future__ import annotations
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> bool:
_A = get_failure_array(__snake_case )
# 2) Step through text searching for pattern
_A , _A = 0, 0 # index into text, pattern
while i < len(__snake_case ):
if pattern[j] == text[i]:
if j == (len(__snake_case ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_A = failure[j - 1]
continue
i += 1
return False
def UpperCAmelCase__ ( __snake_case ) -> list[int]:
_A = [0]
_A = 0
_A = 1
while j < len(__snake_case ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_A = failure[i - 1]
continue
j += 1
failure.append(__snake_case )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCamelCase = """abc1abc12"""
__lowerCamelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCamelCase = """ABABX"""
__lowerCamelCase = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
__lowerCamelCase = """AAAB"""
__lowerCamelCase = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
__lowerCamelCase = """abcdabcy"""
__lowerCamelCase = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
__lowerCamelCase = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 317
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = IFInpaintingSuperResolutionPipeline
a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
a__ = PipelineTesterMixin.required_optional_params - {"latents"}
def A__ ( self):
return self._get_superresolution_dummy_components()
def A__ ( self , __snake_case , __snake_case=0):
if str(__snake_case).startswith('mps'):
_UpperCamelCase : List[str] = torch.manual_seed(__snake_case)
else:
_UpperCamelCase : Any = torch.Generator(device=__snake_case).manual_seed(__snake_case)
_UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case)).to(__snake_case)
_UpperCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 714
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ):
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Optional[Any] = batch_size
_UpperCamelCase : int = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_input_mask
_UpperCamelCase : Union[str, Any] = use_token_type_ids
_UpperCamelCase : Union[str, Any] = use_labels
_UpperCamelCase : Optional[Any] = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Optional[Any] = embedding_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : str = num_attention_heads
_UpperCamelCase : int = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Tuple = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : List[str] = type_vocab_size
_UpperCamelCase : Dict = type_sequence_label_size
_UpperCamelCase : List[str] = initializer_range
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Tuple = num_choices
_UpperCamelCase : List[str] = scope
def A__ ( self):
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase : Any = None
if self.use_input_mask:
_UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase : Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase : int = None
_UpperCamelCase : List[str] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case)
_UpperCamelCase : Optional[Any] = model(__snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Optional[int] = self.num_labels
_UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : Any = self.num_labels
_UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case):
_UpperCamelCase : List[str] = self.num_choices
_UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case)
model.to(__snake_case)
model.eval()
_UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase : Union[str, Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A__ ( self):
_UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = True
# test_resize_embeddings = False
a__ = False
def A__ ( self , __snake_case , __snake_case , __snake_case=False):
_UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case)
if return_labels:
if model_class in get_values(__snake_case):
_UpperCamelCase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case)
_UpperCamelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case)
return inputs_dict
def A__ ( self):
_UpperCamelCase : Any = MegatronBertModelTester(self)
_UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case)
def A__ ( self):
_UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case)
def A__ ( self):
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case)
def A__ ( self):
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case)
def A__ ( self):
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case)
def A__ ( self):
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case)
def A__ ( self):
_UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case)
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.')
def A__ ( self):
_UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
_UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case)
_UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case)
model.to(__snake_case)
model.half()
_UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]])
with torch.no_grad():
_UpperCamelCase : str = model(__snake_case)[0]
_UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24))
self.assertEqual(output.shape , __snake_case)
_UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
_UpperCamelCase : Optional[Any] = output[0, ii, jj]
_UpperCamelCase : Dict = expected[3 * ii + jj]
_UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case)
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
| 648
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 10
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4]
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCamelCase ,self.block_size ,0 ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase ,self.block_size ,0 ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__SCREAMING_SNAKE_CASE = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCamelCase ,self.block_size ,0 ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = process_story(lowerCamelCase )
self.assertEqual(lowerCamelCase ,[] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = process_story(lowerCamelCase )
self.assertEqual(lowerCamelCase ,[] )
self.assertEqual(lowerCamelCase ,[] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = process_story(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""It was the best of times."""]
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4] )
__SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCamelCase ,0 ).numpy() ,expected.numpy() )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase ,23 ).numpy() ,expected.numpy() )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__SCREAMING_SNAKE_CASE = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCamelCase ,1 ).numpy() ,expected.numpy() )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 101
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__SCREAMING_SNAKE_CASE = compute_token_type_ids(lowerCamelCase ,lowerCamelCase )
np.testing.assert_array_equal(lowerCamelCase ,lowerCamelCase )
| 109
|
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[str] ) -> Optional[int]:
UpperCamelCase__ : Union[str, Any] = [1]
for i in range(2 , __UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Any = list(range(__UpperCAmelCase ) )
# Find permutation
while factorials:
UpperCamelCase__ : Tuple = factorials.pop()
UpperCamelCase__ ,UpperCamelCase__ : Any = divmod(__UpperCAmelCase , __UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
| 0
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = (KDPMaDiscreteScheduler,)
UpperCamelCase_ = 10
def lowercase_ ( self , **A_ ) -> List[Any]:
"""simple docstring"""
_lowercase: Tuple = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A_ )
return config
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowercase_ ( self ) -> int:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Optional[int] = self.scheduler_classes[0]
_lowercase: List[str] = self.get_scheduler_config(prediction_type='''v_prediction''' )
_lowercase: List[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase: List[str] = self.dummy_model()
_lowercase: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase: List[Any] = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
_lowercase: Optional[int] = scheduler.scale_model_input(A_ , A_ )
_lowercase: Optional[Any] = model(A_ , A_ )
_lowercase: List[str] = scheduler.step(A_ , A_ , A_ )
_lowercase: str = output.prev_sample
_lowercase: Dict = torch.sum(torch.abs(A_ ) )
_lowercase: Optional[int] = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.00_02 ) < 1E-3
def lowercase_ ( self ) -> str:
"""simple docstring"""
if torch_device == "mps":
return
_lowercase: List[str] = self.scheduler_classes[0]
_lowercase: Optional[int] = self.get_scheduler_config()
_lowercase: int = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowercase: Tuple = self.dummy_model()
_lowercase: Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase: Any = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
_lowercase: Any = scheduler.scale_model_input(A_ , A_ )
_lowercase: List[Any] = model(A_ , A_ )
_lowercase: Any = scheduler.step(A_ , A_ , A_ )
_lowercase: List[Any] = output.prev_sample
_lowercase: Optional[Any] = torch.sum(torch.abs(A_ ) )
_lowercase: Tuple = torch.mean(torch.abs(A_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
def lowercase_ ( self ) -> Any:
"""simple docstring"""
if torch_device == "mps":
return
_lowercase: int = self.scheduler_classes[0]
_lowercase: Union[str, Any] = self.get_scheduler_config()
_lowercase: Optional[Any] = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
_lowercase: List[str] = self.dummy_model()
_lowercase: Union[str, Any] = self.dummy_sample_deter.to(A_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowercase: Optional[Any] = scheduler.scale_model_input(A_ , A_ )
_lowercase: int = model(A_ , A_ )
_lowercase: Tuple = scheduler.step(A_ , A_ , A_ )
_lowercase: int = output.prev_sample
_lowercase: Union[str, Any] = torch.sum(torch.abs(A_ ) )
_lowercase: Dict = torch.mean(torch.abs(A_ ) )
if str(A_ ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1E-2
assert abs(result_mean.item() - 0.02_66 ) < 1E-3
| 715
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Any = pytest.mark.integration
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: str = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(A_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self ) -> int:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
_lowercase: List[str] = dset.map(
lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ )
_lowercase: Dict = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowercase , _lowercase: Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(A_ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
_lowercase: Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: List[Any] = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowercase: List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_lowercase: int = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=A_ )
_lowercase , _lowercase: Dict = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> Any:
"""simple docstring"""
import faiss
_lowercase: str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowercase: List[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: int = 1
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowercase: Tuple = np.eye(5 , dtype=np.floataa )[::-1]
_lowercase , _lowercase: str = index.search_batch(A_ )
self.assertRaises(A_ , index.search_batch , queries[0] )
_lowercase: Tuple = [scores[0] for scores in total_scores]
_lowercase: Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , A_ )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
import faiss
_lowercase: Union[str, Any] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowercase: Dict = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(A_ ):
_lowercase: List[str] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
import faiss
_lowercase: Any = faiss.IndexFlat(5 )
_lowercase: List[Any] = FaissIndex(custom_index=A_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self ) -> str:
"""simple docstring"""
import faiss
_lowercase: Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file:
index.save(tmp_file.name )
_lowercase: Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowercase: Optional[Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Union[str, Any] = 1
_lowercase , _lowercase: Tuple = index.search(A_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
import faiss
_lowercase: Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_lowercase: Tuple = '''index.faiss'''
_lowercase: str = f'''mock://{index_name}'''
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: List[Any] = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
_lowercase: Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_lowercase: Dict = 1
_lowercase , _lowercase: str = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase_ ( self ) -> int:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_lowercase: int = Elasticsearch()
_lowercase: Tuple = {'''acknowledged''': True}
_lowercase: Tuple = ElasticSearchIndex(es_client=A_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_lowercase: Dict = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: Optional[Any] = index.search(A_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowercase: Optional[int] = '''foo'''
_lowercase: Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_lowercase , _lowercase: List[Any] = index.search(A_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowercase: Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_lowercase: str = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ )
_lowercase: Any = [scores[0] for scores in total_scores]
_lowercase: List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
# batched queries with timeout
_lowercase: List[str] = ['''foo''', '''bar''', '''foobar''']
_lowercase: Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_lowercase , _lowercase: Optional[int] = index.search_batch(A_ , request_timeout=30 )
_lowercase: Optional[Any] = [scores[0] for scores in total_scores]
_lowercase: Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(A_ ) , 0 )
self.assertListEqual([1, 1, 1] , A_ )
| 272
| 0
|
'''simple docstring'''
import numpy as np
import datasets
_UpperCamelCase : str = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_UpperCamelCase : List[str] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_UpperCamelCase : Any = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case__ ( datasets.Metric):
def A ( self : str ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def A ( self : List[Any] , _A : Dict , _A : Optional[int] ) -> List[str]:
# convert to numpy arrays
UpperCAmelCase_ : int = np.array(_A )
UpperCAmelCase_ : List[str] = np.array(_A )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : Optional[Any] = X - np.mean(_A )
UpperCAmelCase_ : Union[str, Any] = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Optional[int] = np.linalg.inv(_A )
except np.linalg.LinAlgError:
UpperCAmelCase_ : str = np.linalg.pinv(_A )
UpperCAmelCase_ : Dict = np.dot(_A , _A )
UpperCAmelCase_ : str = np.dot(_A , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 541
|
'''simple docstring'''
_UpperCamelCase : Dict = range(2, 20 + 1)
_UpperCamelCase : str = [10**k for k in range(ks[-1] + 1)]
_UpperCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def __UpperCAmelCase ( A : List[Any] , A : str , A : Union[str, Any] , A : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = sum(a_i[j] for j in range(A , len(A ) ) )
UpperCAmelCase_ : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = 0, 0
UpperCAmelCase_ : str = n - i
UpperCAmelCase_ : Union[str, Any] = memo.get(A )
if sub_memo is not None:
UpperCAmelCase_ : Dict = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
UpperCAmelCase_ : str = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase_ : Any = _k
break
if max_jump >= 0:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase_ : Union[str, Any] = diff + c
for j in range(min(A , len(A ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 )
if new_c > 0:
add(A , A , A )
else:
UpperCAmelCase_ : Tuple = []
else:
UpperCAmelCase_ : List[str] = {c: []}
UpperCAmelCase_ : List[str] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase_ , UpperCAmelCase_ : int = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
UpperCAmelCase_ : List[str] = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase_ : List[Any] = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def __UpperCAmelCase ( A : List[str] , A : Dict , A : List[str] , A : Union[str, Any] ) -> str:
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase_ : int = i
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase_ : Optional[int] = ds_c + ds_b
diff += addend
UpperCAmelCase_ : Any = 0
for j in range(A ):
UpperCAmelCase_ : Dict = a_i[j] + addend
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(A , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : str ) -> List[Any]:
for j in range(A , len(A ) ):
UpperCAmelCase_ : List[str] = digits[j] + addend
if s >= 1_0:
UpperCAmelCase_ , UpperCAmelCase_ : Any = divmod(A , 1_0 )
UpperCAmelCase_ : str = addend // 1_0 + quotient
else:
UpperCAmelCase_ : Any = s
UpperCAmelCase_ : Any = addend // 1_0
if addend == 0:
break
while addend > 0:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = divmod(A , 1_0 )
digits.append(A )
def __UpperCAmelCase ( A : int = 1_0**1_5 ) -> int:
UpperCAmelCase_ : Any = [1]
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[Any] = 0
while True:
UpperCAmelCase_ , UpperCAmelCase_ : str = next_term(A , 2_0 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase_ : Any = 0
for j in range(len(A ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 541
| 1
|
from __future__ import annotations
from math import pow, sqrt
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
"""simple docstring"""
if (resistance, reactance, impedance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase_ , 2) - pow(UpperCAmelCase_ , 2))}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase_ , 2) - pow(UpperCAmelCase_ , 2))}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase_ , 2) + pow(UpperCAmelCase_ , 2))}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = 1
snake_case__ : Dict = 2
while i * i <= n:
snake_case__ : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ( ):
"""simple docstring"""
snake_case__ : int = 1
snake_case__ : str = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 127
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : int = logging.get_logger(__name__)
__lowerCAmelCase : int = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__magic_name__ = TOKENIZER_CLASSES
else:
__magic_name__ = {tokenizer_name: getattr(UpperCamelCase_, tokenizer_name + """Fast""" )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__magic_name__ = TOKENIZER_CLASSES[tokenizer_name]
__magic_name__ = True
if checkpoint_name is None:
__magic_name__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__magic_name__ = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__magic_name__ = tokenizer_class.from_pretrained(UpperCamelCase_, force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__magic_name__ , __magic_name__ = checkpoint.split("""/""" )
__magic_name__ = os.path.join(UpperCamelCase_, UpperCamelCase_ )
elif add_prefix:
__magic_name__ = checkpoint
__magic_name__ = dump_path
else:
__magic_name__ = None
__magic_name__ = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__magic_name__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__magic_name__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
__magic_name__ = os.path.join(UpperCamelCase_, UpperCamelCase_ )
__magic_name__ = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__magic_name__ = tokenizer.save_pretrained(
UpperCamelCase_, legacy_format=UpperCamelCase_, filename_prefix=UpperCamelCase_ )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(UpperCamelCase_ )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 529
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase = 1_0
lowerCamelCase = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
lowerCamelCase = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(UpperCamelCase_ ) ),
} , features=UpperCamelCase_ , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return filename
# FILE_CONTENT + files
_lowerCAmelCase : List[str] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt'
lowerCamelCase = FILE_CONTENT
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with gzip.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lza.frame.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(UpperCamelCase_ , 'w' ) as archive:
archive.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
import tarfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
import lzma
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with lzma.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
import zipfile
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
lowerCamelCase = bytes(UpperCamelCase_ , 'utf-8' )
with zstd.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'file.xml'
lowerCamelCase = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ )
return filename
_lowerCAmelCase : int = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_lowerCAmelCase : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_lowerCAmelCase : List[str] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_lowerCAmelCase : Tuple = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_lowerCAmelCase : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase = datasets.Dataset.from_dict(UpperCamelCase_ )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(UpperCamelCase_ ) ) as con:
lowerCamelCase = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(UpperCamelCase_ , 'w' , newline='' ) as f:
lowerCamelCase = csv.DictWriter(UpperCamelCase_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
import bza
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(UpperCamelCase_ , 'rb' ) as f:
lowerCamelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase_ , 'wb' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
lowerCamelCase = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(UpperCamelCase_ , 'wb' ) as f:
lowerCamelCase = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ )
lowerCamelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase_ ) )] for k in DATA[0]} , schema=UpperCamelCase_ )
writer.write_table(UpperCamelCase_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
lowerCamelCase = {'data': DATA_DICT_OF_LISTS}
with open(UpperCamelCase_ , 'w' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import gzip
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(UpperCamelCase_ , 'rb' ) as orig_file:
with gzip.open(UpperCamelCase_ , 'wb' ) as zipped_file:
zipped_file.writelines(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.add(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(UpperCamelCase_ , 'w' ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join('nested' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = ['0', '1', '2', '3']
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(UpperCamelCase_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
f.write(UpperCamelCase_ , arcname=os.path.join('main_dir' , os.path.basename(UpperCamelCase_ ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(UpperCamelCase_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
lowerCamelCase = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCamelCase_ )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
"""simple docstring"""
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(UpperCamelCase_ , 'w' ) as f:
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ) )
f.write(UpperCamelCase_ , arcname=os.path.basename(UpperCamelCase_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( UpperCamelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 246
| 0
|
from string import ascii_uppercase
SCREAMING_SNAKE_CASE : int = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE : str = dict(enumerate(ascii_uppercase))
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
lowercase_ :str = len(_a )
lowercase_ :List[Any] = 0
while True:
if x == i:
lowercase_ :Optional[Any] = 0
if len(_a ) == len(_a ):
break
key += key[i]
i += 1
return key
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
lowercase_ :List[str] = ''''''
lowercase_ :int = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ :List[str] = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
lowercase_ :Any = ''''''
lowercase_ :Any = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ :Optional[Any] = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Optional[Any] = '''THE GERMAN ATTACK'''
lowercase_ :List[Any] = '''SECRET'''
lowercase_ :int = generate_key(_a , _a )
lowercase_ :Union[str, Any] = cipher_text(_a , _a )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(_a , _a )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 704
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :int = batch_size
lowercase_ :int = seq_length
lowercase_ :str = is_training
lowercase_ :Dict = use_attention_mask
lowercase_ :List[Any] = use_token_type_ids
lowercase_ :str = use_labels
lowercase_ :str = vocab_size
lowercase_ :Optional[int] = hidden_size
lowercase_ :Dict = num_hidden_layers
lowercase_ :List[str] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Union[str, Any] = hidden_act
lowercase_ :Optional[int] = hidden_dropout_prob
lowercase_ :Tuple = attention_probs_dropout_prob
lowercase_ :int = max_position_embeddings
lowercase_ :List[Any] = type_vocab_size
lowercase_ :Any = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Any = num_choices
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = None
if self.use_attention_mask:
lowercase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Union[str, Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :List[str] = config_and_inputs
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase ( self ):
lowercase_ :Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Dict = config_and_inputs
lowercase_ :str = True
lowercase_ :Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : int =True
lowercase : Dict =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = FlaxBertModelTester(self )
@slow
def UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowercase_ :Dict = FlaxBertModel.from_pretrained('''bert-base-cased''' )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 441
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=True ) -> str:
"""simple docstring"""
_a = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=lowerCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
_a = ()
for resnet in self.resnets:
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(lowerCAmelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=True ) -> int:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = True
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=True ) -> Optional[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
if self.add_upsample:
_a = self.upsamplers_a(lowerCAmelCase_ )
return hidden_states
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = 0.0
lowercase_ = 1
lowercase_ = 1
lowercase_ = False
lowercase_ = False
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_a = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_a = []
for _ in range(self.num_layers ):
_a = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase_ )
_a = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase_ )
_a = resnets
_a = attentions
def __call__( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=True ) -> List[str]:
"""simple docstring"""
_a = self.resnets[0](lowerCAmelCase_ , lowerCAmelCase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_a = attn(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
_a = resnet(lowerCAmelCase_ , lowerCAmelCase_ , deterministic=lowerCAmelCase_ )
return hidden_states
| 22
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = CanineTokenizer
__UpperCAmelCase : int = False
def _UpperCamelCase ( self ):
super().setUp()
lowerCamelCase_ : int = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def _UpperCamelCase ( self , **a_ ):
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
lowerCamelCase_ : Dict = 1024
return tokenizer
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.canine_tokenizer
lowerCamelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
lowerCamelCase_ : Dict = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ : List[Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
lowerCamelCase_ : List[str] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a_ , a_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = self.canine_tokenizer
lowerCamelCase_ : Tuple = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowerCamelCase_ : Union[str, Any] = tokenizer(a_ , padding=a_ , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , a_ )
self.assertIn("attention_mask" , a_ )
self.assertIn("token_type_ids" , a_ )
@require_torch
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.canine_tokenizer
lowerCamelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
lowerCamelCase_ : Optional[Any] = tokenizer(
text_target=a_ , max_length=32 , padding="max_length" , truncation=a_ , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def _UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
lowerCamelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : List[Any] = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
shutil.rmtree(a_ )
lowerCamelCase_ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ : List[Any] = tempfile.mkdtemp()
lowerCamelCase_ : Tuple = " He is very happy, UNwant\u00E9d,running"
lowerCamelCase_ : Dict = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ : List[str] = chr(0Xe007 )
additional_special_tokens.append(a_ )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
tokenizer.save_pretrained(a_ )
lowerCamelCase_ : Any = tokenizer.__class__.from_pretrained(a_ )
lowerCamelCase_ : Any = after_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
self.assertIn(a_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ : int = tokenizer.__class__.from_pretrained(a_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ ,lowerCamelCase_ : str = self.get_clean_sequence(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Tuple = 0Xe005
lowerCamelCase_ : Dict = chr(a_ )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowerCamelCase_ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(len(a_ ) , 1 )
lowerCamelCase_ : List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a_ )
lowerCamelCase_ : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Dict = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertEqual(a_ , input_encoded + special_token_id )
lowerCamelCase_ : Optional[int] = tokenizer.decode(a_ , skip_special_tokens=a_ )
self.assertTrue(special_token not in decoded )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Optional[int] = chr(0Xe005 )
lowerCamelCase_ : str = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowerCamelCase_ : Tuple = tokenizer.tokenize(a_ )
lowerCamelCase_ : List[Any] = tokenizer.tokenize(a_ )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(len(a_ ) , 1 )
self.assertEqual(token_a[0] , a_ )
self.assertEqual(token_a[0] , a_ )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ : List[str] = 0Xe006
lowerCamelCase_ : Any = chr(a_ )
lowerCamelCase_ : str = AddedToken(a_ , lstrip=a_ )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a_ )
tokenizer.from_pretrained(a_ )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a_ )
with open(os.path.join(a_ , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : List[Any] = json.load(a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
lowerCamelCase_ : int = json.load(a_ )
# a special token for Canine can be defined as follows:
lowerCamelCase_ : Any = 0Xe006
lowerCamelCase_ : List[Any] = chr(a_ )
lowerCamelCase_ : Any = [new_token_a]
lowerCamelCase_ : Optional[Any] = [new_token_a]
with open(os.path.join(a_ , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
with open(os.path.join(a_ , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(a_ , a_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ : str = tokenizer_class.from_pretrained(a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ : Optional[int] = 0Xe007
lowerCamelCase_ : List[str] = chr(a_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ : int = [AddedToken(a_ , lstrip=a_ )]
lowerCamelCase_ : Dict = tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , extra_ids=0 )
self.assertIn(a_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Union[str, Any] = "hello world"
if self.space_between_special_tokens:
lowerCamelCase_ : int = "[CLS] hello world [SEP]"
else:
lowerCamelCase_ : int = input
lowerCamelCase_ : Optional[Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
lowerCamelCase_ : Any = tokenizer.decode(a_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a_ , [output, output.lower()] )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : Tuple = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowerCamelCase_ : Optional[int] = "a"
lowerCamelCase_ : Dict = ord(a_ )
for attr in attributes_list:
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , attr + "_id" , a_ )
self.assertEqual(getattr(a_ , a_ ) , a_ )
self.assertEqual(getattr(a_ , attr + "_id" ) , a_ )
setattr(a_ , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [] )
lowerCamelCase_ : Optional[int] = 0Xe006
lowerCamelCase_ : List[str] = chr(a_ )
setattr(a_ , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(a_ , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(a_ , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
pass
| 250
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
SCREAMING_SNAKE_CASE = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = RoFormerTokenizer
def __init__( self : List[Any] , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : List[Any]="[UNK]" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Optional[int]="[PAD]" , lowerCamelCase_ : Dict="[CLS]" , lowerCamelCase_ : List[str]="[MASK]" , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : int=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
_lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , lowerCamelCase_ ) != strip_accents
):
_lowercase : List[str] = getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
_lowercase : Optional[int] = do_lower_case
_lowercase : Tuple = strip_accents
_lowercase : str = pre_tok_class(**lowerCamelCase_ )
_lowercase : Dict = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
_lowercase : Optional[int] = self.__dict__.copy()
_lowercase : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__( self : str , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Optional[Any] = d
_lowercase : Dict = self.__dict__['_tokenizer'].get_vocab()
_lowercase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=None ):
"""simple docstring"""
_lowercase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Dict = [self.sep_token_id]
_lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
_lowercase : Dict = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Dict=False , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
_lowercase : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
| 283
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Tuple = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
_lowercase : List[Any] = bs[:]
_lowercase : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCAmelCase )
cs.append(2**8 + n )
n += 1
_lowercase : Dict = [chr(__UpperCAmelCase ) for n in cs]
return dict(zip(__UpperCAmelCase ,__UpperCAmelCase ) )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
_lowercase : Tuple = set()
_lowercase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowercase : Union[str, Any] = char
return pairs
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]="replace" , lowerCamelCase_ : List[str]="<s>" , lowerCamelCase_ : Optional[int]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : List[Any]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ):
"""simple docstring"""
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
_lowercase : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_lowercase : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
_lowercase : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
_lowercase : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
_lowercase : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle:
_lowercase : str = json.load(lowerCamelCase_ )
_lowercase : Tuple = {v: k for k, v in self.encoder.items()}
_lowercase : Any = errors # how to handle errors in decoding
_lowercase : str = bytes_to_unicode()
_lowercase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle:
_lowercase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
_lowercase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
_lowercase : Any = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowercase : Tuple = {}
_lowercase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowercase : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_lowercase : str = tuple(lowerCamelCase_ )
_lowercase : Dict = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_lowercase : List[str] = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowercase , _lowercase : List[Any] = bigram
_lowercase : Optional[int] = []
_lowercase : Optional[Any] = 0
while i < len(lowerCamelCase_ ):
try:
_lowercase : int = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowercase : Dict = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowercase : Tuple = tuple(lowerCamelCase_ )
_lowercase : Dict = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_lowercase : str = get_pairs(lowerCamelCase_ )
_lowercase : str = ' '.join(lowerCamelCase_ )
_lowercase : List[str] = word
return word
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
"""simple docstring"""
_lowercase : Union[str, Any] = []
for token in re.findall(self.pat , lowerCamelCase_ ):
_lowercase : List[str] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Any ):
"""simple docstring"""
_lowercase : Dict = ''.join(lowerCamelCase_ )
_lowercase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowercase : Union[str, Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowercase : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' )
_lowercase : Optional[int] = 0
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowercase : int = token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
_lowercase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : int = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=False , **lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
_lowercase : Dict = ' ' + text
return (text, kwargs)
| 283
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = BlipImageProcessor()
_UpperCAmelCase = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_UpperCAmelCase = BlipaProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **snake_case_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def __A ( self , **snake_case_ ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def __A ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[str]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> Tuple:
_UpperCAmelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
_UpperCAmelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case_ , return_tensors="np" )
_UpperCAmelCase = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = processor(text=snake_case_ )
_UpperCAmelCase = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __A ( self ) -> str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case_ )
_UpperCAmelCase = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
_UpperCAmelCase = "lower newer"
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 426
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = XLNetTokenizer
A__ : Tuple = XLNetTokenizerFast
A__ : List[Any] = True
A__ : int = True
def __A ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case_ ) , 1006 )
def __A ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def __A ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __A ( self ) -> Union[str, Any]:
# fmt: off
_UpperCAmelCase = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 426
| 1
|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
snake_case_ : Any = "http://www.mocksite.com/file1.txt"
snake_case_ : Dict = "\"text\": [\"foo\", \"foo\"]"
snake_case_ : Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class snake_case_ :
'''simple docstring'''
lowerCamelCase = 2_00
lowerCamelCase = {"Content-Length": "100"}
lowerCamelCase = {}
def __SCREAMING_SNAKE_CASE ( self : str , **__magic_name__ : Union[str, Any] ) -> str:
return [bytes(__magic_name__ , "utf-8" )]
def __a ( *__UpperCAmelCase : Dict , **__UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def __a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
import requests
monkeypatch.setattr(_lowerCamelCase , "request" , _lowerCamelCase )
lowerCamelCase_ : str = URL
if issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : Union[str, Any] = url
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : Any = [url]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : Union[str, Any] = {"train": url}
lowerCamelCase_ : Tuple = "dummy"
lowerCamelCase_ : Optional[Any] = "downloads"
lowerCamelCase_ : List[Any] = tmp_path
lowerCamelCase_ : Optional[int] = DownloadConfig(
cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , )
lowerCamelCase_ : Union[str, Any] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
lowerCamelCase_ : Tuple = dl_manager.download(_lowerCamelCase )
lowerCamelCase_ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : str = [downloaded_paths]
lowerCamelCase_ : List[Any] = [urls]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in downloaded_paths.keys()
lowerCamelCase_ : Union[str, Any] = downloaded_paths.values()
lowerCamelCase_ : Optional[int] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCamelCase_ : Dict = Path(_lowerCamelCase )
lowerCamelCase_ : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCamelCase_ : Optional[int] = downloaded_path.read_text()
assert content == CONTENT
lowerCamelCase_ : Union[str, Any] = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
lowerCamelCase_ : Optional[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : str = str(_lowerCamelCase )
if issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : Optional[Any] = filename
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : int = [filename]
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : List[str] = {"train": filename}
lowerCamelCase_ : Tuple = "dummy"
lowerCamelCase_ : int = xz_file.parent
lowerCamelCase_ : List[Any] = "extracted"
lowerCamelCase_ : Dict = DownloadConfig(
cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , )
lowerCamelCase_ : Tuple = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase )
lowerCamelCase_ : Tuple = dl_manager.extract(_lowerCamelCase )
lowerCamelCase_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ : Any = [extracted_paths]
lowerCamelCase_ : Optional[Any] = [paths]
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
assert "train" in extracted_paths.keys()
lowerCamelCase_ : str = extracted_paths.values()
lowerCamelCase_ : Any = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCamelCase_ : Any = Path(_lowerCamelCase )
lowerCamelCase_ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCamelCase_ : int = extracted_path.read_text()
lowerCamelCase_ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_lowerCamelCase , start=1 ):
lowerCamelCase_ : List[str] = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = request.getfixturevalue(_lowerCamelCase )
lowerCamelCase_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = request.getfixturevalue(_lowerCamelCase )
lowerCamelCase_ : List[str] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ):
_test_jsonl(_lowerCamelCase , _lowerCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def __a ( __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ):
assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 704
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
snake_case_ : List[Any] = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
snake_case_ : Tuple = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
snake_case_ : Optional[Any] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, float]:
"""simple docstring"""
lowerCamelCase_ : Tuple = len([g for position, g in enumerate(__UpperCAmelCase ) if g == main_target[position]] )
return (item, float(__UpperCAmelCase ))
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : str ) -> tuple[str, str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = random.randint(0 , len(__UpperCAmelCase ) - 1 )
lowerCamelCase_ : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
lowerCamelCase_ : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = list(__UpperCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowerCamelCase_ : Optional[int] = random.choice(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def __a ( __UpperCAmelCase : tuple[str, float] , __UpperCAmelCase : list[tuple[str, float]] , __UpperCAmelCase : list[str] , ) -> list[str]:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
# Generate more children proportionally to the fitness score.
lowerCamelCase_ : str = int(parent_a[1] * 100 ) + 1
lowerCamelCase_ : List[str] = 10 if child_n >= 10 else child_n
for _ in range(__UpperCAmelCase ):
lowerCamelCase_ : str = population_score[random.randint(0 , __UpperCAmelCase )][0]
lowerCamelCase_ , lowerCamelCase_ : str = crossover(parent_a[0] , __UpperCAmelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
pop.append(mutate(__UpperCAmelCase , __UpperCAmelCase ) )
return pop
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : list[str] , __UpperCAmelCase : bool = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowerCamelCase_ : Tuple = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(__UpperCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowerCamelCase_ : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowerCamelCase_ : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(__UpperCAmelCase )
# Generate random starting population.
lowerCamelCase_ : int = []
for _ in range(__UpperCAmelCase ):
population.append("".join([random.choice(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowerCamelCase_ , lowerCamelCase_ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowerCamelCase_ : int = [evaluate(__UpperCAmelCase , __UpperCAmelCase ) for item in population]
# Check if there is a matching evolution.
lowerCamelCase_ : Any = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowerCamelCase_ : List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCAmelCase )
# Normalize population score to be between 0 and 1.
lowerCamelCase_ : Optional[int] = [
(item, score / len(__UpperCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCAmelCase ):
population.extend(select(population_score[int(__UpperCAmelCase )] , __UpperCAmelCase , __UpperCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
snake_case_ : str = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
snake_case_ : Optional[int] = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = basic(target_str, genes_list)
print(
f"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 253
| 0
|
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCamelCase = "AAPL" ):
'''simple docstring'''
__lowercase = F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
__lowercase = BeautifulSoup(requests.get(_UpperCamelCase ).text , '''html.parser''' )
__lowercase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 639
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a : Union[str, Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
a : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a : List[Any] = dict(zip(vocab, range(len(vocab))))
a : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
a : Union[str, Any] = Path(tmpdirname)
a : Dict = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
a : Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
a : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
a : Optional[int] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a : List[str] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a : Union[str, Any] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
a : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
a : Union[str, Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 639
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = tempfile.mkdtemp()
_UpperCamelCase : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCamelCase : Dict = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase__ , lowercase__ )
def snake_case__ ( self : Optional[Any] , **lowercase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : Tuple , **lowercase__ : str ) ->Dict:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : List[Any] , **lowercase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def snake_case__ ( self : Any ) ->Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : Any = self.get_rust_tokenizer()
_UpperCamelCase : List[Any] = self.get_image_processor()
_UpperCamelCase : Union[str, Any] = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCamelCase : int = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
_UpperCamelCase : Any = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase__ )
self.assertIsInstance(processor_fast.tokenizer , lowercase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase__ )
self.assertIsInstance(processor_fast.image_processor , lowercase__ )
def snake_case__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCamelCase : Tuple = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
_UpperCamelCase : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def snake_case__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : int = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_UpperCamelCase : Optional[int] = self.prepare_image_inputs()
_UpperCamelCase : List[Any] = image_processor(lowercase__ , return_tensors="np" )
_UpperCamelCase : Optional[int] = processor(images=lowercase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_image_processor()
_UpperCamelCase : Optional[int] = self.get_tokenizer()
_UpperCamelCase : List[Any] = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_UpperCamelCase : Any = "lower newer"
_UpperCamelCase : Union[str, Any] = processor(text=lowercase__ )
_UpperCamelCase : Optional[Any] = tokenizer(lowercase__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : str = self.get_image_processor()
_UpperCamelCase : str = self.get_tokenizer()
_UpperCamelCase : Union[str, Any] = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_UpperCamelCase : Optional[int] = "lower newer"
_UpperCamelCase : Dict = self.prepare_image_inputs()
_UpperCamelCase : Dict = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def snake_case__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCamelCase : List[str] = self.get_image_processor()
_UpperCamelCase : Dict = self.get_tokenizer()
_UpperCamelCase : Dict = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_UpperCamelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase : Dict = processor.batch_decode(lowercase__ )
_UpperCamelCase : Dict = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def snake_case__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_image_processor()
_UpperCamelCase : Optional[Any] = self.get_tokenizer()
_UpperCamelCase : Optional[int] = AlignProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
_UpperCamelCase : Optional[Any] = "lower newer"
_UpperCamelCase : Optional[int] = self.prepare_image_inputs()
_UpperCamelCase : List[Any] = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 204
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase_ : Optional[Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCAmelCase_ : int = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __A ( UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = collections.OrderedDict()
with open(UpperCAmelCase ,"r" ,encoding="utf-8" ) as reader:
_UpperCamelCase : int = reader.readlines()
for index, token in enumerate(UpperCAmelCase ):
_UpperCamelCase : Optional[int] = token.rstrip("\n" )
_UpperCamelCase : List[str] = index
return vocab
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict , lowercase__ : List[str] , lowercase__ : Any="<unk>" , lowercase__ : str=200 ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = vocab
_UpperCamelCase : str = unk_token
_UpperCamelCase : List[Any] = max_input_chars_per_word
def snake_case__ ( self : Union[str, Any] , lowercase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCamelCase : int = list(lowercase__ )
if len(lowercase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
_UpperCamelCase : int = 0
_UpperCamelCase : List[str] = []
while start < len(lowercase__ ):
_UpperCamelCase : Union[str, Any] = len(lowercase__ )
_UpperCamelCase : Union[str, Any] = None
while start < end:
_UpperCamelCase : Dict = "".join(chars[start:end] )
if substr in self.vocab:
_UpperCamelCase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowercase__ )
_UpperCamelCase : List[str] = end
return sub_tokens
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = False
def __init__( self : int , lowercase__ : str , lowercase__ : int="<d>" , lowercase__ : Tuple="</d>" , lowercase__ : Optional[int]="<s>" , lowercase__ : Optional[int]="</s>" , lowercase__ : List[str]="<pad>" , lowercase__ : Optional[int]="<unk>" , lowercase__ : int="</n>" , lowercase__ : Optional[Any]="</_>" , lowercase__ : int="left" , **lowercase__ : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowercase__ , eod_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , unk_token=lowercase__ , line_token=lowercase__ , space_token=lowercase__ , padding_side=lowercase__ , **lowercase__ , )
_UpperCamelCase : Union[str, Any] = bod_token
_UpperCamelCase : Union[str, Any] = eod_token
_UpperCamelCase : int = load_vocab(lowercase__ )
_UpperCamelCase : Any = self.encoder[space_token]
_UpperCamelCase : List[str] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_UpperCamelCase : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase__ : x[1] ) )
_UpperCamelCase : Any = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def snake_case__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def snake_case__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
return self.encoder["\n"]
@property
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
return len(self.encoder )
def snake_case__ ( self : Dict ) ->Tuple:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : Optional[int] , lowercase__ : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = []
for x in jieba.cut(lowercase__ , cut_all=lowercase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase__ ) )
return output_tokens
def snake_case__ ( self : Optional[Any] , lowercase__ : Any , **lowercase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = [i for i in token_ids if i >= 0]
_UpperCamelCase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowercase__ , **lowercase__ )
def snake_case__ ( self : Optional[Any] , lowercase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
return token in self.encoder
def snake_case__ ( self : str , lowercase__ : List[str] ) ->str:
'''simple docstring'''
return "".join(lowercase__ )
def snake_case__ ( self : List[Any] , lowercase__ : Optional[Any] ) ->int:
'''simple docstring'''
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Any , lowercase__ : Dict ) ->Dict:
'''simple docstring'''
return self.decoder.get(lowercase__ , self.unk_token )
def snake_case__ ( self : Tuple , lowercase__ : str , lowercase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if os.path.isdir(lowercase__ ):
_UpperCamelCase : Tuple = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_UpperCamelCase : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory
_UpperCamelCase : List[str] = 0
if " " in self.encoder:
_UpperCamelCase : Dict = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_UpperCamelCase : int = self.encoder["\n"]
del self.encoder["\n"]
_UpperCamelCase : List[str] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase__ : x[1] ) )
with open(lowercase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
_UpperCamelCase : str = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def snake_case__ ( self : List[Any] , lowercase__ : List[int] , lowercase__ : List[int] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def snake_case__ ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ ))
return [1] + ([0] * len(lowercase__ ))
| 204
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : str = '''ZinengTang/tvlt-base'''
UpperCAmelCase__ : int = tempfile.mkdtemp()
def lowerCAmelCase__ ( self ,**lowerCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint ,**__snake_case )
def lowerCAmelCase__ ( self ,**lowerCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**__snake_case )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_feature_extractor()
UpperCAmelCase__ : List[str] = TvltProcessor(image_processor=__snake_case ,feature_extractor=__snake_case )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : List[str] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,__snake_case )
self.assertIsInstance(processor.image_processor ,__snake_case )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Any = TvltProcessor(image_processor=__snake_case ,feature_extractor=__snake_case )
UpperCAmelCase__ : List[str] = np.ones([12000] )
UpperCAmelCase__ : List[str] = feature_extractor(__snake_case ,return_tensors='''np''' )
UpperCAmelCase__ : Tuple = processor(audio=__snake_case ,return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : Any = TvltProcessor(image_processor=__snake_case ,feature_extractor=__snake_case )
UpperCAmelCase__ : Dict = np.ones([3, 224, 224] )
UpperCAmelCase__ : Dict = image_processor(__snake_case ,return_tensors='''np''' )
UpperCAmelCase__ : Dict = processor(images=__snake_case ,return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : int = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[int] = TvltProcessor(image_processor=__snake_case ,feature_extractor=__snake_case )
UpperCAmelCase__ : Tuple = np.ones([12000] )
UpperCAmelCase__ : List[Any] = np.ones([3, 224, 224] )
UpperCAmelCase__ : str = processor(audio=__snake_case ,images=__snake_case )
self.assertListEqual(list(inputs.keys() ) ,['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Dict = TvltProcessor(image_processor=__snake_case ,feature_extractor=__snake_case )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' ,)
| 614
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : int = logging.getLogger()
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
class A__ ( snake_case__ ):
"""simple docstring"""
def a_ ( self ):
snake_case = logging.StreamHandler(sys.stdout )
logger.addHandler(__snake_case )
def a_ ( self , __snake_case ):
snake_case = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''' )
with patch.object(__snake_case , '''argv''' , __snake_case ):
snake_case = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__snake_case , 0.666 )
@slow
@require_torch_non_multi_gpu
def a_ ( self ):
snake_case = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(__snake_case )
snake_case = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
snake_case = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(__snake_case )
| 550
| 0
|
'''simple docstring'''
import os
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
with open(os.path.dirname(A__ ) + '/p022_names.txt' ) as file:
UpperCamelCase = str(file.readlines()[0] )
UpperCamelCase = names.replace('"' , '' ).split(',' )
names.sort()
UpperCamelCase = 0
UpperCamelCase = 0
for i, name in enumerate(A__ ):
for letter in name:
name_score += ord(A__ ) - 64
total_score += (i + 1) * name_score
UpperCamelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 324
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self : Any , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 5_0 , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
UpperCamelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 324
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Dict:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A: List[str] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Tuple = TFAutoModel.from_pretrained(A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Optional[Any] = AutoModel.from_pretrained(A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A: str = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Optional[int] = AutoModelForPreTraining.from_pretrained(A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> Tuple:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: List[str] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Dict = TFAutoModelForCausalLM.from_pretrained(A , from_pt=A )
A , A: List[str] = TFAutoModelForCausalLM.from_pretrained(
A , output_loading_info=A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Optional[int] = AutoModelForCausalLM.from_pretrained(A , from_tf=A )
A , A: Tuple = AutoModelForCausalLM.from_pretrained(
A , output_loading_info=A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: List[str] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: str = TFAutoModelWithLMHead.from_pretrained(A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Tuple = AutoModelWithLMHead.from_pretrained(A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> str:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: Union[str, Any] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: List[Any] = TFAutoModelForMaskedLM.from_pretrained(A , from_pt=A )
A , A: Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
A , output_loading_info=A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Union[str, Any] = AutoModelForMaskedLM.from_pretrained(A , from_tf=A )
A , A: Dict = AutoModelForMaskedLM.from_pretrained(
A , output_loading_info=A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A: Tuple = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(A , from_pt=A )
A , A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
A , output_loading_info=A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: List[str] = AutoModelForSeqaSeqLM.from_pretrained(A , from_tf=A )
A , A: Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
A , output_loading_info=A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A: Union[str, Any] = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Tuple = TFAutoModelForSequenceClassification.from_pretrained(A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Optional[Any] = AutoModelForSequenceClassification.from_pretrained(A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
@slow
def a__ ( self ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A: str = AutoConfig.from_pretrained(A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: str = TFAutoModelForQuestionAnswering.from_pretrained(A , from_pt=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
A: Any = AutoModelForQuestionAnswering.from_pretrained(A , from_tf=A )
self.assertIsNotNone(A )
self.assertIsInstance(A , A )
def a__ ( self ) -> Any:
A: str = TFAutoModelWithLMHead.from_pretrained(A , from_pt=A )
self.assertIsInstance(A , A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=A ) , 1_44_10 )
A: List[str] = AutoModelWithLMHead.from_pretrained(A , from_tf=A )
self.assertIsInstance(A , A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=A ) , 1_44_10 )
def a__ ( self ) -> Union[str, Any]:
A: int = TFAutoModelWithLMHead.from_pretrained(A , from_pt=A )
self.assertIsInstance(A , A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=A ) , 1_44_10 )
A: Union[str, Any] = AutoModelWithLMHead.from_pretrained(A , from_tf=A )
self.assertIsInstance(A , A )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=A ) , 1_44_10 )
| 135
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__SCREAMING_SNAKE_CASE : Any ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE : int ={
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: List[str] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A: Union[str, Any] = bs[:]
A: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
A: List[str] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] ):
'''simple docstring'''
A: List[str] = set()
A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A: Union[str, Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Optional[int]:
A: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
A: Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
A: List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
A: Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
A: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A: Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , )
with open(A , encoding="""utf-8""" ) as vocab_handle:
A: Union[str, Any] = json.load(A )
A: Tuple = {v: k for k, v in self.encoder.items()}
A: Optional[Any] = errors # how to handle errors in decoding
A: str = bytes_to_unicode()
A: Dict = {v: k for k, v in self.byte_encoder.items()}
with open(A , encoding="""utf-8""" ) as merges_handle:
A: List[str] = merges_handle.read().split("""\n""" )[1:-1]
A: Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
A: Optional[Any] = dict(zip(A , range(len(A ) ) ) )
A: List[Any] = {}
A: str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A: Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a__ ( self ) -> Optional[int]:
return len(self.encoder )
def a__ ( self ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self , A ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
A: Tuple = tuple(A )
A: Optional[int] = get_pairs(A )
if not pairs:
return token
while True:
A: Optional[Any] = min(A , key=lambda A : self.bpe_ranks.get(A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A: Tuple = bigram
A: Any = []
A: Union[str, Any] = 0
while i < len(A ):
try:
A: Dict = word.index(A , A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A: Tuple = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A: str = tuple(A )
A: List[Any] = new_word
if len(A ) == 1:
break
else:
A: Optional[int] = get_pairs(A )
A: List[str] = """ """.join(A )
A: Dict = word
return word
def a__ ( self , A ) -> List[str]:
A: Dict = []
for token in re.findall(self.pat , A ):
A: Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def a__ ( self , A ) -> int:
return self.encoder.get(A , self.encoder.get(self.unk_token ) )
def a__ ( self , A ) -> int:
return self.decoder.get(A )
def a__ ( self , A ) -> int:
A: Tuple = """""".join(A )
A: List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A: str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A: Optional[int] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A ) + """\n""" )
A: Optional[Any] = 0
with open(A , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A: Union[str, Any] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A: int = [self.cls_token_id]
A: Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def a__ ( self , A , A = None ) -> List[int]:
A: Optional[Any] = [self.sep_token_id]
A: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self , A , A=False , **A ) -> Dict:
A: Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
A: Optional[int] = """ """ + text
return (text, kwargs)
def a__ ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
A: Any = super()._pad(
encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
# Load from model defaults
if return_attention_mask is None:
A: str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A: List[str] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A: Optional[int] = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
A: Union[str, Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A: List[Any] = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A: List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 135
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __UpperCAmelCase :
def __init__( self : Optional[Any] , a_ : List[Any] , a_ : List[str]=13 , a_ : Optional[int]=7 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[str]=True , a_ : Optional[Any]=True , a_ : int=99 , a_ : int=32 , a_ : List[Any]=2 , a_ : Optional[int]=4 , a_ : str=37 , a_ : Optional[int]="gelu" , a_ : Any=0.1 , a_ : Optional[Any]=0.1 , a_ : Optional[int]=5_12 , a_ : int=16 , a_ : List[str]=2 , a_ : Any=0.02 , a_ : Optional[int]=False , a_ : Dict=True , a_ : int="None" , a_ : Dict=3 , a_ : Tuple=4 , a_ : Optional[Any]=None , ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = parent
a__ : Optional[Any] = batch_size
a__ : Optional[int] = seq_length
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_input_mask
a__ : List[Any] = use_token_type_ids
a__ : List[Any] = use_labels
a__ : Union[str, Any] = vocab_size
a__ : int = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Optional[int] = intermediate_size
a__ : str = hidden_act
a__ : str = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : List[Any] = type_vocab_size
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : List[str] = num_labels
a__ : int = num_choices
a__ : Optional[Any] = relative_attention
a__ : Union[str, Any] = position_biased_input
a__ : Dict = pos_att_type
a__ : int = scope
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Union[str, Any] = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : Dict = None
if self.use_token_type_ids:
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : Dict = None
a__ : Optional[Any] = None
a__ : Optional[int] = None
if self.use_labels:
a__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self : Optional[Any] , a_ : Any , a_ : int , a_ : Any , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : List[Any] ) -> Dict:
'''simple docstring'''
a__ : int = TFDebertaVaModel(config=a_ )
a__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ : Dict = [input_ids, input_mask]
a__ : Any = model(a_ )
a__ : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : List[str] , a_ : List[str] , a_ : Dict , a_ : Any , a_ : List[str] , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : str = TFDebertaVaForMaskedLM(config=a_ )
a__ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : List[str] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self : Optional[int] , a_ : Optional[int] , a_ : Tuple , a_ : int , a_ : int , a_ : str , a_ : Dict , a_ : List[str] ) -> Any:
'''simple docstring'''
a__ : List[str] = self.num_labels
a__ : Dict = TFDebertaVaForSequenceClassification(config=a_ )
a__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self : Union[str, Any] , a_ : Optional[int] , a_ : List[str] , a_ : str , a_ : Optional[Any] , a_ : List[str] , a_ : List[str] , a_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.num_labels
a__ : Dict = TFDebertaVaForTokenClassification(config=a_ )
a__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self : str , a_ : List[str] , a_ : Optional[int] , a_ : int , a_ : Optional[Any] , a_ : Dict , a_ : int , a_ : Tuple ) -> str:
'''simple docstring'''
a__ : int = TFDebertaVaForQuestionAnswering(config=a_ )
a__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ : Tuple = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : int = config_and_inputs
a__ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase : str = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase : List[str] = False
__lowerCamelCase : str = False
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = TFDebertaVaModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def UpperCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a_ )
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a__ : Optional[Any] = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
a__ : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
a__ : Any = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a_ , atol=1E-4 )
| 251
|
"""simple docstring"""
__UpperCAmelCase = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__UpperCAmelCase = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def lowercase__ ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
'''simple docstring'''
a__ : Optional[int] = from_type.lower().strip("s" )
a__ : Any = to_type.lower().strip("s" )
a__ : List[Any] = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Optional[int] = UNIT_SYMBOL.get(lowerCAmelCase__ , lowerCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
a__ : int = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
a__ : Optional[Any] = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
a__ : List[str] = METRIC_CONVERSION[from_sanitized]
a__ : str = METRIC_CONVERSION[to_sanitized]
a__ : List[Any] = 1
if from_exponent > to_exponent:
a__ : Tuple = from_exponent - to_exponent
else:
a__ : List[Any] = -(to_exponent - from_exponent)
return value * pow(1_0 , lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : list ):
if len(__snake_case ) <= 1:
return [tuple(__snake_case )]
_A = []
def generate(__snake_case : int , __snake_case : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __snake_case )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_A , _A = arr[k - 1], arr[i]
else: # k is odd
_A , _A = arr[k - 1], arr[0]
generate(k - 1 , __snake_case )
generate(len(__snake_case ) , __snake_case )
return res
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 107
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "OwlViTImageProcessor"
UpperCAmelCase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str ):
UpperCAmelCase__ :int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCamelCase , )
UpperCAmelCase__ :Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase__ :List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : int , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : str="max_length" , __lowerCamelCase : Any="np" , **__lowerCamelCase : Tuple ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase ) or (isinstance(__lowerCamelCase , __lowerCamelCase ) and not isinstance(text[0] , __lowerCamelCase )):
UpperCAmelCase__ :Any = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )]
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(text[0] , __lowerCamelCase ):
UpperCAmelCase__ :Tuple = []
# Maximum number of queries across batch
UpperCAmelCase__ :List[str] = max([len(__lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ :str = t + [''' '''] * (max_num_queries - len(__lowerCamelCase ))
UpperCAmelCase__ :Tuple = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
encodings.append(__lowerCamelCase )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
UpperCAmelCase__ :List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Any = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :List[Any] = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Union[str, Any] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :List[str] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :Optional[Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
UpperCAmelCase__ :List[Any] = BatchEncoding()
UpperCAmelCase__ :Union[str, Any] = input_ids
UpperCAmelCase__ :Dict = attention_mask
if query_images is not None:
UpperCAmelCase__ :Tuple = BatchEncoding()
UpperCAmelCase__ :int = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ).pixel_values
UpperCAmelCase__ :Optional[int] = query_pixel_values
if images is not None:
UpperCAmelCase__ :str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ :Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ):
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ):
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ):
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : int , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCamelCase , )
return self.image_processor_class
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCamelCase , )
return self.image_processor
| 467
| 0
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__snake_case : Any = re.compile(R'\s+')
def _UpperCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[Any]:
return {"hash": hashlib.mda(re.sub(_UpperCamelCase, '''''', example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
A_ = [len(_UpperCamelCase ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(_UpperCamelCase ), "line_max": max(_UpperCamelCase )}
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any] ) -> int:
A_ = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : Union[str, Any] ) -> List[Any]:
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : List[str]=5 ) -> List[Any]:
A_ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
A_ = example['''content'''].splitlines()
for _, line in zip(range(_UpperCamelCase ), _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : str=5, _UpperCamelCase : Optional[int]=0.0_5 ) -> Any:
A_ = ['''unit tests''', '''test file''', '''configuration file''']
A_ = example['''content'''].splitlines()
A_ = 0
A_ = 0
# first test
for _, line in zip(range(_UpperCamelCase ), _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ = example['''content'''].count('''\n''' )
A_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _UpperCAmelCase ( _UpperCamelCase : Tuple ) -> int:
A_ = ['''def ''', '''class ''', '''for ''', '''while ''']
A_ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : str=4 ) -> Dict:
A_ = example['''content'''].splitlines()
A_ = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> int:
A_ = tokenizer(example['''content'''], truncation=_UpperCamelCase )['''input_ids''']
A_ = len(example['''content'''] ) / len(_UpperCamelCase )
return {"ratio": ratio}
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Any:
A_ = {}
results.update(get_hash(_UpperCamelCase ) )
results.update(line_stats(_UpperCamelCase ) )
results.update(alpha_stats(_UpperCamelCase ) )
results.update(char_token_ratio(_UpperCamelCase ) )
results.update(is_autogenerated(_UpperCamelCase ) )
results.update(is_config_or_test(_UpperCamelCase ) )
results.update(has_no_keywords(_UpperCamelCase ) )
results.update(has_few_assignments(_UpperCamelCase ) )
return results
def _UpperCAmelCase ( _UpperCamelCase : Optional[Any], _UpperCamelCase : List[Any], _UpperCamelCase : Tuple ) -> Dict:
if not check_uniques(_UpperCamelCase, _UpperCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _UpperCAmelCase ( _UpperCamelCase : List[Any] ) -> Union[str, Any]:
with open(_UpperCamelCase, '''rb''' ) as f_in:
with gzip.open(str(_UpperCamelCase ) + '''.gz''', '''wb''', compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCamelCase, _UpperCamelCase )
os.unlink(_UpperCamelCase )
# Settings
__snake_case : int = HfArgumentParser(PreprocessingArguments)
__snake_case : str = parser.parse_args()
if args.num_workers is None:
__snake_case : List[Any] = multiprocessing.cpu_count()
__snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__snake_case : Optional[int] = time.time()
__snake_case : int = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__snake_case : List[str] = time.time()
__snake_case : List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__snake_case : Any = set(ds.unique('hash'))
__snake_case : Tuple = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__snake_case : List[Any] = time.time()
__snake_case : List[Any] = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__snake_case : Optional[Any] = time.time()
__snake_case , __snake_case : Dict = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__snake_case : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
__snake_case : str = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
__snake_case : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__snake_case : Tuple = str(data_dir / F"""file-{file_number+1:012}.json""")
__snake_case : Optional[int] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 174
|
'''simple docstring'''
import requests
__snake_case : int = 'YOUR API KEY'
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str = giphy_api_key ) -> list:
A_ = '''+'''.join(query.split() )
A_ = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A_ = requests.get(_UpperCamelCase ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 174
| 1
|
import functools
from typing import Any
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
A : dict[str, Any] = {}
A : Union[str, Any] = '''WORD_KEEPER'''
for word in words:
A : int = trie
for c in word:
if c not in trie_node:
A : Dict = {}
A : Optional[Any] = trie_node[c]
A : Tuple = True
A : Dict = len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(lowerCamelCase_ ) -> bool:
if index == len_string:
return True
A : Optional[Any] = trie
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
A : List[Any] = trie_node.get(string[i] , lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_ , lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 542
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Optional[Any] = {"vocab_file": "vocab.txt"}
lowercase : Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
lowercase : Tuple = {
"YituTech/conv-bert-base": 5_12,
"YituTech/conv-bert-medium-small": 5_12,
"YituTech/conv-bert-small": 5_12,
}
lowercase : Optional[int] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : int = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = ConvBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[int]:
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
A : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
A : Tuple = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
A : int = do_lower_case
A : int = strip_accents
A : Union[str, Any] = tokenize_chinese_chars
A : str = normalizer_class(**__UpperCAmelCase )
A : str = do_lower_case
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]:
A : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
A : List[Any] = [self.sep_token_id]
A : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
A : Optional[int] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 542
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : Dict = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase : Any = {
"""yjernite/retribert-base-uncased""": 5_1_2,
}
_lowerCAmelCase : Optional[int] = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case__ : Optional[Any] = getattr(lowerCamelCase , normalizer_state.pop('''type''' ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Dict = strip_accents
snake_case__ : Optional[Any] = tokenize_chinese_chars
snake_case__ : Tuple = normalizer_class(**lowerCamelCase )
snake_case__ : List[str] = do_lower_case
def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
"""simple docstring"""
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Dict:
"""simple docstring"""
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 717
|
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694
| 0
|
import cva
import numpy as np
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ ):
if k in (0.04, 0.06):
_snake_case : List[Any] = k
_snake_case : int = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ):
return str(self.k )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = cva.imread(lowercase_ , 0 )
_snake_case ,_snake_case : str = img.shape
_snake_case : list[list[int]] = []
_snake_case : int = img.copy()
_snake_case : int = cva.cvtColor(lowercase_ , cva.COLOR_GRAY2RGB )
_snake_case ,_snake_case : List[Any] = np.gradient(lowercase_ )
_snake_case : List[str] = dx**2
_snake_case : List[str] = dy**2
_snake_case : str = dx * dy
_snake_case : List[str] = 0.04
_snake_case : List[str] = self.window_size // 2
for y in range(lowercase_ , h - offset ):
for x in range(lowercase_ , w - offset ):
_snake_case : List[str] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : Tuple = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_snake_case : Any = (wxx * wyy) - (wxy**2)
_snake_case : Dict = wxx + wyy
_snake_case : str = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = HarrisCorner(0.04, 3)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE : Any = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 670
|
from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 670
| 1
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def a__ ( lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def a__ ( lowerCAmelCase__ ) -> np.ndarray:
return (gray > 1_27) & (gray <= 2_55)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = np.zeros_like(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase__ : Union[str, Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase__ : Any = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase__ : Dict = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
UpperCamelCase__ = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
UpperCamelCase__ = np.array(Image.open(lena_path))
# kernel to be applied
UpperCamelCase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
UpperCamelCase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
UpperCamelCase__ = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 312
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'markuplm'
def __init__( self : Union[str, Any] , _A : int=30_522 , _A : Optional[Any]=768 , _A : Any=12 , _A : Optional[Any]=12 , _A : str=3_072 , _A : Tuple="gelu" , _A : int=0.1 , _A : Union[str, Any]=0.1 , _A : str=512 , _A : Any=2 , _A : int=0.0_2 , _A : Tuple=1e-12 , _A : List[str]=0 , _A : Dict=0 , _A : Any=2 , _A : Union[str, Any]=256 , _A : str=1_024 , _A : Optional[int]=216 , _A : Tuple=1_001 , _A : Tuple=32 , _A : Union[str, Any]=50 , _A : List[str]="absolute" , _A : int=True , _A : Tuple=None , **_A : Dict , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = max_position_embeddings
UpperCAmelCase__ : Dict = type_vocab_size
UpperCAmelCase__ : List[Any] = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Optional[Any] = position_embedding_type
UpperCAmelCase__ : str = use_cache
UpperCAmelCase__ : int = classifier_dropout
# additional properties
UpperCAmelCase__ : Tuple = max_depth
UpperCAmelCase__ : List[str] = max_xpath_tag_unit_embeddings
UpperCAmelCase__ : List[Any] = max_xpath_subs_unit_embeddings
UpperCAmelCase__ : int = tag_pad_id
UpperCAmelCase__ : Dict = subs_pad_id
UpperCAmelCase__ : str = xpath_unit_hidden_size
| 312
| 1
|
_lowerCAmelCase : Tuple ="""
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase : Optional[int] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase : Optional[Any] ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 113
|
_lowerCAmelCase : int ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase : List[str] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase : int ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 113
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__lowercase :int = True
from torch.cuda.amp import autocast
__lowercase :str = logging.getLogger(__name__)
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Whether to log verbose messages or not."} , )
snake_case_ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
snake_case_ = field(
default=0.9_9_9_9_9_5 , metadata={"help": "Decay of gumbel temperature during training."} )
def UpperCAmelCase ( _lowerCamelCase : ModelArguments , _lowerCamelCase : TrainingArguments ):
'''simple docstring'''
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE__ : str = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE__ : str = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE__ : List[str] = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = field(
default=lowercase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default=lowercase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
snake_case_ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
snake_case_ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
snake_case_ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
snake_case_ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
snake_case_ = field(
default=lowercase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
snake_case_ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class _a :
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = "longest"
snake_case_ = None
snake_case_ = None
def __call__( self : Any , a : List[Dict[str, Union[List[int], torch.Tensor]]] ) ->Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.feature_extractor.pad(
a , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
SCREAMING_SNAKE_CASE__ : int = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE__ : List[Any] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE__ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE__ : Optional[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=a , min_masks=2 , )
return batch
class _a ( lowercase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *a : Dict , a : List[Any]=1 , a : Optional[Any]=0 , a : Tuple=1.0 , **a : Tuple ) ->Optional[int]:
super().__init__(*a , **a )
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : str = max_gumbel_temp
SCREAMING_SNAKE_CASE__ : str = min_gumbel_temp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = gumbel_temp_decay
def A_ ( self : Any , a : nn.Module , a : Dict[str, Union[torch.Tensor, Any]] ) ->torch.Tensor:
model.train()
SCREAMING_SNAKE_CASE__ : int = self._prepare_inputs(a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE__ : Optional[int] = self.compute_loss(a , a )
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.compute_loss(a , a )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE__ : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE__ : Tuple = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(a ).backward()
elif self.use_apex:
with amp.scale_loss(a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(a )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE__ : int = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : Optional[int] = DatasetDict()
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE__ : str = DatasetDict()
SCREAMING_SNAKE_CASE__ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase : Any ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE__ : Dict = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE__ : Optional[Any] = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE__ : int = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase : Union[str, Any] ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE__ : int = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE__ : Any = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
SCREAMING_SNAKE_CASE__ : Dict = WavaVecaForPreTraining(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 718
|
def UpperCAmelCase ( _lowerCamelCase : int = 1_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = -1
SCREAMING_SNAKE_CASE__ : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
SCREAMING_SNAKE_CASE__ : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
SCREAMING_SNAKE_CASE__ : Dict = n - a - b
if c * c == (a * a + b * b):
SCREAMING_SNAKE_CASE__ : str = a * b * c
if candidate >= product:
SCREAMING_SNAKE_CASE__ : List[str] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 26
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = KandinskyVaaInpaintPipeline
snake_case__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
snake_case__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
snake_case__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ = False
@property
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
return 32
@property
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return 32
@property
def _UpperCamelCase ( self : Tuple ) -> List[str]:
return self.time_input_dim
@property
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : str ) -> List[Any]:
return 100
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCamelCase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_UpperCamelCase = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def _UpperCamelCase ( self : int ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self : Optional[int] ) -> Tuple:
torch.manual_seed(0 )
_UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self : Tuple ) -> Any:
_UpperCamelCase = self.dummy_unet
_UpperCamelCase = self.dummy_movq
_UpperCamelCase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCamelCase , )
_UpperCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCamelCase ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=0 ) -> List[str]:
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCamelCase )
# create init_image
_UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
_UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
_UpperCamelCase = 0
if str(__UpperCamelCase ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCamelCase = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**__UpperCamelCase )
_UpperCamelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
_UpperCamelCase = output.images
_UpperCamelCase = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _UpperCamelCase ( self : Dict ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_UpperCamelCase = np.ones((768, 768) , dtype=np.floataa )
_UpperCamelCase = 0
_UpperCamelCase = '''a hat'''
_UpperCamelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
_UpperCamelCase = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase , _UpperCamelCase = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_UpperCamelCase = pipeline(
image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 420
|
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowercase ( a__ : int = 1000000 , a__ : int = 10 ) -> int:
_UpperCamelCase = defaultdict(a__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_UpperCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_UpperCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(a__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 420
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = AltDiffusionPipeline
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Union[str, Any])-> str:
torch.manual_seed(0)
__lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCAmelCase =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0)
__lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0)
__lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__lowerCAmelCase =CLIPTextModel(snake_case_)
__lowerCAmelCase =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""")
__lowerCAmelCase =77
__lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int]=0)-> Any:
if str(snake_case_).startswith("""mps"""):
__lowerCAmelCase =torch.manual_seed(snake_case_)
else:
__lowerCAmelCase =torch.Generator(device=snake_case_).manual_seed(snake_case_)
__lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : str)-> str:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3)
def UpperCamelCase ( self : Tuple)-> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
torch.manual_seed(0)
__lowerCAmelCase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase =RobertaSeriesModelWithTransformation(snake_case_)
__lowerCAmelCase =text_encoder
__lowerCAmelCase =AltDiffusionPipeline(**snake_case_)
__lowerCAmelCase =alt_pipe.to(snake_case_)
alt_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase ="""A photo of an astronaut"""
__lowerCAmelCase =alt_pipe(**snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : Optional[Any])-> Union[str, Any]:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =PNDMScheduler(skip_prk_steps=snake_case_)
torch.manual_seed(0)
__lowerCAmelCase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase =RobertaSeriesModelWithTransformation(snake_case_)
__lowerCAmelCase =text_encoder
__lowerCAmelCase =AltDiffusionPipeline(**snake_case_)
__lowerCAmelCase =alt_pipe.to(snake_case_)
alt_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase =alt_pipe(**snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase =np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int])-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any)-> int:
# make sure here that pndm scheduler skips prk
__lowerCAmelCase =AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=snake_case_)
__lowerCAmelCase =alt_pipe.to(snake_case_)
alt_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase ="""A painting of a squirrel eating a burger"""
__lowerCAmelCase =torch.manual_seed(0)
__lowerCAmelCase =alt_pipe([prompt] , generator=snake_case_ , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""")
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def UpperCamelCase ( self : Optional[Any])-> Tuple:
__lowerCAmelCase =DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""")
__lowerCAmelCase =AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=snake_case_ , safety_checker=snake_case_)
__lowerCAmelCase =alt_pipe.to(snake_case_)
alt_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase ="""A painting of a squirrel eating a burger"""
__lowerCAmelCase =torch.manual_seed(0)
__lowerCAmelCase =alt_pipe([prompt] , generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""")
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 456
|
def __lowerCAmelCase ( __lowerCamelCase : int = 3 , __lowerCamelCase : int = 7 , __lowerCamelCase : int = 1000000 ) -> int:
__lowerCAmelCase =0
__lowerCAmelCase =1
for current_denominator in range(1 , limit + 1 ):
__lowerCAmelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowerCAmelCase =current_numerator
__lowerCAmelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 456
| 1
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Any=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Any=True , __lowerCamelCase : str=True , __lowerCamelCase : Any=99 , __lowerCamelCase : Any=32 , __lowerCamelCase : Optional[int]=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : int=0.02 , __lowerCamelCase : int=3 , __lowerCamelCase : Dict=4 , __lowerCamelCase : str=None , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Optional[int] ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
def lowercase_ ( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = NystromformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = NystromformerForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = NystromformerForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[str] ) -> int:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = NystromformerForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = NystromformerForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = NystromformerForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),(
SCREAMING_SNAKE_CASE__
),
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
a = False
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 )
def lowercase_ ( self : str ) -> str:
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ = type
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : Tuple ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = NystromformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = '''the [MASK] of Belgium is Brussels'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
SCREAMING_SNAKE_CASE__ = tokenizer(__lowerCamelCase , return_tensors='''pt''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowerCamelCase ) , '''capital''' )
| 493
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(_A )
if n_element < 1:
SCREAMING_SNAKE_CASE__ = ValueError('''a should be a positive number''' )
raise my_error
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (0, 0, 0)
SCREAMING_SNAKE_CASE__ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
_SCREAMING_SNAKE_CASE : int = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 493
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
@property
def a ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a ( self : str ):
__UpperCAmelCase = ort.SessionOptions()
__UpperCAmelCase = False
return options
def a ( self : int ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a ( self : Optional[Any] ):
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
__UpperCAmelCase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
__UpperCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__UpperCAmelCase = '''A red cat sitting on a park bench'''
__UpperCAmelCase = np.random.RandomState(0 )
__UpperCAmelCase = pipe(
prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type='''np''' , )
__UpperCAmelCase = output.images
__UpperCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__UpperCAmelCase = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 701
|
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 397
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _snake_case :
UpperCamelCase__ : int
UpperCamelCase__ : int
class _snake_case :
def __init__( self : Union[str, Any], __lowercase : Dict ):
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = size
def __getitem__( self : Optional[int], __lowercase : int ):
return iter(self._graph[vertex] )
@property
def A__ ( self : int ):
return self._size
def A__ ( self : List[str], __lowercase : Optional[int], __lowercase : List[str], __lowercase : int ):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) )
def A__ ( self : int, __lowercase : str, __lowercase : Union[str, Any] ):
lowercase__ = deque([start_vertex] )
lowercase__ = [None] * self.size
lowercase__ = 0
while queue:
lowercase__ = queue.popleft()
lowercase__ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__ = current_distance + edge.weight
lowercase__ = distances[edge.destination_vertex]
if (
isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
and new_distance >= dest_vertex_distance
):
continue
lowercase__ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCamelCase : list[int] , UpperCamelCase : int ):
if len(UpperCamelCase ) < k or k < 0:
raise ValueError("""Invalid Input""" )
UpperCAmelCase : Optional[Any] = sum(array[:k] )
for i in range(len(UpperCamelCase ) - k ):
UpperCAmelCase : str = current_sum - array[i] + array[i + k]
UpperCAmelCase : Tuple = max(UpperCamelCase , UpperCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
A: str = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
A: int = randint(0, 1_1_0)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 160
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : Dict = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = NystromformerModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ : Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
UpperCAmelCase_ : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = NystromformerForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : int = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Any = NystromformerForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = self.num_labels
UpperCAmelCase_ : Optional[Any] = NystromformerForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.num_choices
UpperCAmelCase_ : List[str] = NystromformerForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
UpperCAmelCase_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :Optional[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ :Optional[int] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ :Tuple = False
lowerCamelCase_ :Any = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = NystromformerModelTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Dict = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = NystromformerModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
UpperCAmelCase_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(UpperCAmelCase__ )[0]
UpperCAmelCase_ : int = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
UpperCAmelCase_ : Dict = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''the [MASK] of Belgium is Brussels'''
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
UpperCAmelCase_ : Optional[int] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
UpperCAmelCase_ : List[Any] = tokenizer(UpperCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(encoding.input_ids ).logits
UpperCAmelCase_ : Tuple = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) , 'capital' )
| 700
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : str = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase_ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ , lowerCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 389
| 0
|
def __a ( __lowerCAmelCase = 1000 ) -> int:
SCREAMING_SNAKE_CASE : Optional[int] = 2**power
SCREAMING_SNAKE_CASE : Any = str(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = 0
for i in list_num:
sum_of_num += int(__lowerCAmelCase )
return sum_of_num
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCamelCase : Tuple = solution(power)
print("""Sum of the digits is: """, result)
| 352
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( *snake_case : Dict , **snake_case : List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase ( unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase_ ( self : List[Any] , snake_case : List[str] , snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
SCREAMING_SNAKE_CASE : Any = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def lowerCamelCase_ ( self : int , snake_case : int , snake_case : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = vqa_pipeline(snake_case , top_k=1 )
self.assertEqual(
snake_case , [
[{'score': ANY(snake_case ), 'answer': ANY(snake_case )}],
[{'score': ANY(snake_case ), 'answer': ANY(snake_case )}],
] , )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
SCREAMING_SNAKE_CASE : List[Any] = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE : Any = 'How many cats are there?'
SCREAMING_SNAKE_CASE : Optional[Any] = vqa_pipeline(image=snake_case , question='How many cats are there?' , top_k=2 )
self.assertEqual(
snake_case , [{'score': ANY(snake_case ), 'answer': ANY(snake_case )}, {'score': ANY(snake_case ), 'answer': ANY(snake_case )}] )
SCREAMING_SNAKE_CASE : int = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
snake_case , [{'score': ANY(snake_case ), 'answer': ANY(snake_case )}, {'score': ANY(snake_case ), 'answer': ANY(snake_case )}] )
@slow
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
SCREAMING_SNAKE_CASE : int = './tests/fixtures/tests_samples/COCO/000000039769.png'
SCREAMING_SNAKE_CASE : Dict = 'How many cats are there?'
SCREAMING_SNAKE_CASE : int = vqa_pipeline(image=snake_case , question=snake_case , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
SCREAMING_SNAKE_CASE : List[str] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
SCREAMING_SNAKE_CASE : Any = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
pass
| 352
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class snake_case_ ( A__ ):
"""simple docstring"""
__lowerCAmelCase : Dict ='''unispeech'''
def __init__( self , UpperCamelCase=32 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=30_72 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=0.0_2 , UpperCamelCase=1E-5 , UpperCamelCase="group" , UpperCamelCase="gelu" , UpperCamelCase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase=False , UpperCamelCase=1_28 , UpperCamelCase=16 , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=0.0_5 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=3_20 , UpperCamelCase=2 , UpperCamelCase=0.1 , UpperCamelCase=1_00 , UpperCamelCase=2_56 , UpperCamelCase=2_56 , UpperCamelCase=0.1 , UpperCamelCase="mean" , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=2_56 , UpperCamelCase=80 , UpperCamelCase=0 , UpperCamelCase=1 , UpperCamelCase=2 , UpperCamelCase=0.5 , **UpperCamelCase , ):
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase)
lowerCamelCase__ = hidden_size
lowerCamelCase__ = feat_extract_norm
lowerCamelCase__ = feat_extract_activation
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = list(UpperCamelCase)
lowerCamelCase__ = conv_bias
lowerCamelCase__ = num_conv_pos_embeddings
lowerCamelCase__ = num_conv_pos_embedding_groups
lowerCamelCase__ = len(self.conv_dim)
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = feat_proj_dropout
lowerCamelCase__ = final_dropout
lowerCamelCase__ = layerdrop
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_ctc_classes
lowerCamelCase__ = vocab_size
lowerCamelCase__ = do_stable_layer_norm
lowerCamelCase__ = use_weighted_layer_sum
lowerCamelCase__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase__ = apply_spec_augment
lowerCamelCase__ = mask_time_prob
lowerCamelCase__ = mask_time_length
lowerCamelCase__ = mask_time_min_masks
lowerCamelCase__ = mask_feature_prob
lowerCamelCase__ = mask_feature_length
lowerCamelCase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase__ = num_codevectors_per_group
lowerCamelCase__ = num_codevector_groups
lowerCamelCase__ = contrastive_logits_temperature
lowerCamelCase__ = feat_quantizer_dropout
lowerCamelCase__ = num_negatives
lowerCamelCase__ = codevector_dim
lowerCamelCase__ = proj_codevector_dim
lowerCamelCase__ = diversity_loss_weight
# ctc loss
lowerCamelCase__ = ctc_loss_reduction
lowerCamelCase__ = ctc_zero_infinity
# pretraining loss
lowerCamelCase__ = replace_prob
@property
def __UpperCAmelCase ( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 426
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_ :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=13 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[2, 2, 3, 2] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=10 , UpperCamelCase=0.0_2 , UpperCamelCase=["stage2", "stage3", "stage4"] , UpperCamelCase=[2, 3, 4] , UpperCamelCase=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = image_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = num_stages
lowerCamelCase__ = hidden_sizes
lowerCamelCase__ = depths
lowerCamelCase__ = is_training
lowerCamelCase__ = use_labels
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = num_labels
lowerCamelCase__ = initializer_range
lowerCamelCase__ = out_features
lowerCamelCase__ = out_indices
lowerCamelCase__ = scope
def __UpperCAmelCase ( self):
lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__ = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = ConvNextVaModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = ConvNextVaForImageClassification(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase , labels=UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __UpperCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = ConvNextVaBackbone(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowerCamelCase__ = None
lowerCamelCase__ = ConvNextVaBackbone(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
lowerCamelCase__ = model(UpperCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = config_and_inputs
lowerCamelCase__ = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] =(
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict =False
__lowerCAmelCase : List[Any] =False
__lowerCAmelCase : Union[str, Any] =False
__lowerCAmelCase : Tuple =False
__lowerCAmelCase : Tuple =False
def __UpperCAmelCase ( self):
lowerCamelCase__ = ConvNextVaModelTester(self)
lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37)
def __UpperCAmelCase ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def __UpperCAmelCase ( self):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def __UpperCAmelCase ( self):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def __UpperCAmelCase ( self):
pass
def __UpperCAmelCase ( self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ = True
if model_class.__name__ in [
*get_values(UpperCamelCase),
*get_values(UpperCamelCase),
]:
continue
lowerCamelCase__ = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
lowerCamelCase__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase)
lowerCamelCase__ = model(**UpperCamelCase).loss
loss.backward()
def __UpperCAmelCase ( self):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ = False
lowerCamelCase__ = True
if (
model_class.__name__
in [*get_values(UpperCamelCase), *get_values(UpperCamelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase)
lowerCamelCase__ = model(**UpperCamelCase).loss
loss.backward()
def __UpperCAmelCase ( self):
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = model_class(UpperCamelCase)
lowerCamelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ = [*signature.parameters.keys()]
lowerCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase)
def __UpperCAmelCase ( self):
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase):
lowerCamelCase__ = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
with torch.no_grad():
lowerCamelCase__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase))
lowerCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase)
def __UpperCAmelCase ( self):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase)
@slow
def __UpperCAmelCase ( self):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = ConvNextVaModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def __UpperCAmelCase ( self):
lowerCamelCase__ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(UpperCamelCase)
lowerCamelCase__ = self.default_image_processor
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = preprocessor(images=UpperCamelCase , return_tensors="pt").to(UpperCamelCase)
# forward pass
with torch.no_grad():
lowerCamelCase__ = model(**UpperCamelCase)
# verify the logits
lowerCamelCase__ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , UpperCamelCase)
lowerCamelCase__ = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4))
| 426
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[int] ,_lowerCAmelCase : Path ,_lowerCAmelCase : Union[str, None] = None ,_lowerCAmelCase : Union[List[str], None] = None ,_lowerCAmelCase : Union[str, List[str], None] = None ,_lowerCAmelCase : bool = True ,):
"""simple docstring"""
__snake_case = [file for file in os.listdir(_lowerCAmelCase ) if os.path.isfile(os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) )]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append("__init__.py" )
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" ,_lowerCAmelCase )
if only_modules:
__snake_case = file.split("." )[0]
try:
__snake_case = getattr(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = doctest.DocTestSuite(_lowerCAmelCase )
__snake_case = unittest.TextTestRunner().run(_lowerCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
__snake_case = doctest.testfile(str(".." / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "modeling"
__snake_case = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(_lowerCAmelCase ,identifier=_lowerCAmelCase ,ignore_files=_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "tokenization"
self.analyze_directory(_lowerCAmelCase ,identifier=_lowerCAmelCase )
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = "configuration"
self.analyze_directory(_lowerCAmelCase ,identifier=_lowerCAmelCase )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case = Path("src/transformers" )
__snake_case = ["configuration", "modeling", "tokenization"]
self.analyze_directory(_lowerCAmelCase ,n_identifier=_lowerCAmelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = Path("docs/source" )
__snake_case = ["favicon.ico"]
self.analyze_directory(_lowerCAmelCase ,ignore_files=_lowerCAmelCase ,only_modules=_lowerCAmelCase )
| 524
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase__ = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
lowerCamelCase__ = 3E8 # unit of c : m * s^-1
def _lowerCamelCase( __snake_case , __snake_case , __snake_case ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
__snake_case = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__snake_case = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__snake_case = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase = StableDiffusionPipeline(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , requires_safety_checker=_SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , _SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
return self.pipea(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=_SCREAMING_SNAKE_CASE , height=_SCREAMING_SNAKE_CASE , width=_SCREAMING_SNAKE_CASE , num_inference_steps=_SCREAMING_SNAKE_CASE , guidance_scale=_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , output_type=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35
| 0
|
import math
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
_UpperCAmelCase = len(UpperCAmelCase_ )
_UpperCAmelCase = int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
_UpperCAmelCase = 0
while arr[min(UpperCAmelCase_ , UpperCAmelCase_ ) - 1] < x:
_UpperCAmelCase = step
step += int(math.floor(math.sqrt(UpperCAmelCase_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_UpperCAmelCase = prev + 1
if prev == min(UpperCAmelCase_ , UpperCAmelCase_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase = [int(item) for item in user_input.split(",")]
__lowerCAmelCase = int(input("Enter the number to be searched:\n"))
__lowerCAmelCase = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F'''Number {x} is at index {res}''')
| 684
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> str:
snake_case__ = 1
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ = unet.half()
snake_case__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _snake_case ( self ) -> List[Any]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='np' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_jukebox""": [
"""JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""JukeboxConfig""",
"""JukeboxPriorConfig""",
"""JukeboxVQVAEConfig""",
],
"""tokenization_jukebox""": ["""JukeboxTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""JukeboxModel""",
"""JukeboxPreTrainedModel""",
"""JukeboxVQVAE""",
"""JukeboxPrior""",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase__ : List[str] = False
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
super().setUp()
__UpperCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__UpperCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__UpperCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + "\n" )
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Dict=False , __lowerCAmelCase: Union[str, Any]=20 , __lowerCAmelCase: Tuple=5 ) -> Tuple[str, list]:
'''simple docstring'''
__UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )) for i in range(len(__lowerCAmelCase ) )]
__UpperCAmelCase = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
__UpperCAmelCase = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
__UpperCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCAmelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
__UpperCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
__UpperCAmelCase = " " + output_txt
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self: str , **__lowerCAmelCase: List[Any] ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__UpperCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__UpperCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
__UpperCAmelCase = tokenizer("maɪ c" , do_phonemize=__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , [3, 200] ) # mai should be <unk> (=3)
def _UpperCAmelCase ( self: int ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: Any ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__lowerCAmelCase ).input_ids , tokenizer(__lowerCAmelCase , do_phonemize=__lowerCAmelCase ).input_ids )
def _UpperCAmelCase ( self: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch_tokens[0] )
self.assertEqual(__lowerCAmelCase , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer.phonemize(__lowerCAmelCase , phonemizer_lang="en-us" )
__UpperCAmelCase = tokenizer.decode(tokenizer(__lowerCAmelCase ).input_ids , filter_word_delimiter_token=__lowerCAmelCase )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__lowerCAmelCase )
__UpperCAmelCase = "Hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="en-us" ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__lowerCAmelCase , "ɛ l o h aʊ a ʁ j u" )
def _UpperCAmelCase ( self: Tuple ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__UpperCAmelCase = "Hello how Are you"
__UpperCAmelCase = "hello how are you"
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
__UpperCAmelCase = tokenizer(__lowerCAmelCase ).input_ids
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase: List[Any] , __lowerCAmelCase: List[str] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__UpperCAmelCase = tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase , filter_word_delimiter_token=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _UpperCAmelCase ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[int] ):
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertTrue(isinstance(outputs_list[0] , __lowerCAmelCase ) )
# transform list to ModelOutput
__UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__lowerCAmelCase: Optional[int] , __lowerCAmelCase: List[Any] ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
[recursive_check(__lowerCAmelCase , __lowerCAmelCase ) for la, la in zip(__lowerCAmelCase , __lowerCAmelCase )]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
__UpperCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__UpperCAmelCase = tokenizer.batch_decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase )
__UpperCAmelCase = [tokenizer.decode(__lowerCAmelCase , output_char_offsets=__lowerCAmelCase ) for ids in sample_ids]
check_list_tuples_equal(__lowerCAmelCase , __lowerCAmelCase )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _UpperCAmelCase ( self: Dict ) -> int:
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: str ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__UpperCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__UpperCAmelCase = tokenizer.add_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__UpperCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__UpperCAmelCase = tokenizer.add_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.vocab_size
__UpperCAmelCase = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
__UpperCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Dict ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__UpperCAmelCase = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(output["text"] , __lowerCAmelCase )
| 286
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.