code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase__ = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _a ( a :Dict , a :Any , a :Dict , a :int=None ) -> Optional[Any]:
# Initialise PyTorch model
a = XLNetConfig.from_json_file(a )
a = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
a = finetuning_task
a = GLUE_TASKS_NUM_LABELS[finetuning_task]
a = XLNetForSequenceClassification(a )
elif "squad" in finetuning_task:
a = finetuning_task
a = XLNetForQuestionAnswering(a )
else:
a = XLNetLMHeadModel(a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a , a , a )
# Save pytorch-model
a = os.path.join(a , a )
a = os.path.join(a , a )
print(F"""Save PyTorch model to {os.path.abspath(a )}""" )
torch.save(model.state_dict() , a )
print(F"""Save configuration file to {os.path.abspath(a )}""" )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
UpperCAmelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 117
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ = logging.getLogger(__name__)
def _a ( a :Union[str, Any] , a :Tuple ) -> Optional[Any]:
# save results
if os.path.exists(a ):
if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile(
os.path.join(a , '''config.json''' ) ):
os.remove(os.path.join(a , '''config.json''' ) )
if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(a , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(a , '''pytorch_model.bin''' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def _a ( a :List[Any] , a :Union[str, Any]=False ) -> int:
a = 2
if unlogit:
a = torch.pow(a , a )
a = p * torch.log(a )
a = 0
return -plogp.sum(dim=-1 )
def _a ( a :List[str] ) -> Union[str, Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _a ( a :Optional[int] , a :Dict , a :Tuple , a :Tuple=True , a :Union[str, Any]=True , a :str=None , a :Union[str, Any]=False ) -> int:
a , a = model.config.num_hidden_layers, model.config.num_attention_heads
a = torch.zeros(a , a ).to(args.device )
a = torch.zeros(a , a ).to(args.device )
if head_mask is None:
a = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a = None
a = 0.0
a = 0.0
for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
a = tuple(t.to(args.device ) for t in inputs )
((a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a , a , a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
a = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a = 2
a = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(a )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(a )
logger.info('''Head ranked by importance scores''' )
a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a = torch.arange(
head_importance.numel() , device=args.device )
a = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def _a ( a :Optional[Any] , a :List[Any] , a :str ) -> Optional[Any]:
a , a , a = compute_heads_importance(a , a , a , compute_entropy=a )
a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold )
a = torch.ones_like(a )
a = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a = original_score
while current_score >= original_score * args.masking_threshold:
a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a = float('''Inf''' )
a = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
a = new_head_mask.view(-1 )
a = 0.0
a = new_head_mask.view_as(a )
a = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( a :List[Any] , a :Optional[int] , a :Tuple , a :List[str] ) -> List[str]:
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
a = 1 / loss
a = datetime.now() - before_time
a = sum(p.numel() for p in model.parameters() )
a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
a = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
a = sum(p.numel() for p in model.parameters() )
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
a = 1 / loss
a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(a , args.output_dir )
def _a ( ) -> int:
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=a , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=a , default=42 )
parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a = torch.device('''cuda''' , args.local_rank )
a = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
a = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , a )
# Prepare dataset
a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a = (torch.from_numpy(a ),)
a = TensorDataset(*a )
a = RandomSampler(a )
a = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 117
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : int = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 0.9 , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Union[str, Any] = size if size is not None else {'shortest_edge': 224}
__A : Optional[int] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
__A : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : str = get_size_dict(_UpperCAmelCase , param_name='crop_size')
__A : Tuple = do_resize
__A : str = size
__A : Optional[Any] = crop_pct
__A : int = resample
__A : str = do_center_crop
__A : str = crop_size
__A : List[Any] = do_rescale
__A : List[Any] = rescale_factor
__A : Any = do_normalize
__A : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__A : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Any = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}')
if crop_pct is not None:
if "shortest_edge" in size:
__A : int = int(size['shortest_edge'] / crop_pct)
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__A : List[Any] = int(size['height'] / crop_pct)
else:
__A : Any = (int(size['height'] / crop_pct), int(size['width'] / crop_pct))
else:
raise ValueError('Invalid size for resize: {}'.format(_UpperCAmelCase))
__A : List[Any] = get_resize_output_image_size(_UpperCAmelCase , size=_UpperCAmelCase , default_to_square=_UpperCAmelCase)
else:
if "shortest_edge" in size:
__A : int = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase)
elif "height" in size and "width" in size:
__A : Any = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(_UpperCAmelCase))
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : int = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}')
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Optional[int] = do_resize if do_resize is not None else self.do_resize
__A : Optional[Any] = crop_pct if crop_pct is not None else self.crop_pct
__A : Dict = resample if resample is not None else self.resample
__A : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : Dict = do_rescale if do_rescale is not None else self.do_rescale
__A : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : str = do_normalize if do_normalize is not None else self.do_normalize
__A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__A : Dict = image_std if image_std is not None else self.image_std
__A : str = size if size is not None else self.size
__A : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
__A : Optional[int] = crop_size if crop_size is not None else self.crop_size
__A : Optional[Any] = get_size_dict(_UpperCAmelCase , param_name='crop_size')
__A : Optional[int] = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
__A : int = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
__A : int = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , crop_pct=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_center_crop:
__A : Optional[int] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase) for image in images]
if do_rescale:
__A : Union[str, Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : List[Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : List[Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase)
| 338
|
'''simple docstring'''
lowercase__ : List[Any] = '''Input must be a string of 8 numbers plus letter'''
lowercase__ : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _lowerCAmelCase ( __snake_case : str ) -> bool:
if not isinstance(__snake_case , __snake_case ):
__A : str = f'Expected string as input, found {type(__snake_case ).__name__}'
raise TypeError(__snake_case )
__A : Any = spanish_id.replace('-' , '' ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
__A : Optional[Any] = int(spanish_id_clean[0:8] )
__A : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case( metaclass=UpperCAmelCase ):
__snake_case: str = ['speech']
def __init__(self : List[Any] , *a : List[str] , **a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ['speech'] )
class _snake_case( metaclass=UpperCAmelCase ):
__snake_case: List[str] = ['speech']
def __init__(self : Dict , *a : Any , **a : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(self , ['speech'] )
| 531
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __a ( unittest.TestCase ):
_a : int = MODEL_FOR_MASKED_LM_MAPPING
_a : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-0_5, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-0_5, 'token': 25506, 'token_str': ' accuser'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-0_5,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-0_5,
'token': 25506,
'token_str': ' accuser',
},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-0_5, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-0_5, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-0_5, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
] , )
_UpperCAmelCase = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
_UpperCAmelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12790,
'token_str': ' Lyon',
},
] , )
_UpperCAmelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
@require_tf
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(_SCREAMING_SNAKE_CASE , [] )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
fill_masker('This is' )
self.run_test_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_SCREAMING_SNAKE_CASE ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [top_mask['token_str'] for top_mask in outputs]
_UpperCAmelCase = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ) == set(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[''] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets='' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , top_k=2 )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=_SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el['token_str'] for el in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_SCREAMING_SNAKE_CASE ).issubset(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=_SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE ) , nested_simplify(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=_SCREAMING_SNAKE_CASE , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 3 )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FillMaskPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
{'sequence': ANY(_SCREAMING_SNAKE_CASE ), 'score': ANY(_SCREAMING_SNAKE_CASE ), 'token': ANY(_SCREAMING_SNAKE_CASE ), 'token_str': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
| 618
| 0
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__lowercase , default='microsoft/unixcoder-base-nine')
parser.add_argument('--num_epochs' , type=__lowercase , default=5)
parser.add_argument('--batch_size' , type=__lowercase , default=6)
parser.add_argument('--gradient_accumulation_steps' , type=__lowercase , default=1)
parser.add_argument('--freeze' , type=__lowercase , default=__lowercase)
parser.add_argument('--learning_rate' , type=__lowercase , default=5e-4)
parser.add_argument('--seed' , type=__lowercase , default=0)
parser.add_argument('--lr_scheduler_type' , type=__lowercase , default='cosine')
parser.add_argument('--num_warmup_steps' , type=__lowercase , default=10)
parser.add_argument('--weight_decay' , type=__lowercase , default=0.01)
parser.add_argument('--output_dir' , type=__lowercase , default='./results')
return parser.parse_args()
snake_case__ : List[Any] = load("""accuracy""")
def _snake_case (__lowercase):
UpperCamelCase_ , UpperCamelCase_ = eval_pred
UpperCamelCase_ = np.argmax(__lowercase , axis=1)
return metric.compute(predictions=__lowercase , references=__lowercase)
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> None:
super().__init__()
UpperCamelCase_ = trainer
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
if control.should_evaluate:
UpperCamelCase_ = deepcopy(_UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def _snake_case ():
UpperCamelCase_ = get_args()
set_seed(args.seed)
UpperCamelCase_ = load_dataset('codeparrot/codecomplex' , split='train')
UpperCamelCase_ = dataset.train_test_split(test_size=0.2)
UpperCamelCase_ = train_test['test'].train_test_split(test_size=0.5)
UpperCamelCase_ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
})
print('Loading tokenizer and model')
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
UpperCamelCase_ = tokenizer.eos_token
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7)
UpperCamelCase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCamelCase_ = False
UpperCamelCase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'])))
def tokenize(__lowercase):
UpperCamelCase_ = tokenizer(example['src'] , truncation=__lowercase , max_length=1024)
UpperCamelCase_ = labels.straint(example['complexity'])
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCamelCase_ = train_test_validation.map(
__lowercase , batched=__lowercase , remove_columns=train_test_validation['train'].column_names , )
UpperCamelCase_ = DataCollatorWithPadding(tokenizer=__lowercase)
UpperCamelCase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
UpperCamelCase_ = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , )
print('Training...')
trainer.add_callback(CustomCallback(__lowercase))
trainer.train()
if __name__ == "__main__":
main()
| 618
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : Optional[Any] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
snake_case__ : List[str] = {
"""google/realm-cc-news-pretrained-embedder""": 5_1_2,
"""google/realm-cc-news-pretrained-encoder""": 5_1_2,
"""google/realm-cc-news-pretrained-scorer""": 5_1_2,
"""google/realm-cc-news-pretrained-openqa""": 5_1_2,
"""google/realm-orqa-nq-openqa""": 5_1_2,
"""google/realm-orqa-nq-reader""": 5_1_2,
"""google/realm-orqa-wq-openqa""": 5_1_2,
"""google/realm-orqa-wq-reader""": 5_1_2,
}
snake_case__ : Tuple = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_INIT_CONFIGURATION
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = RealmTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ) -> Optional[Any]:
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = strip_accents
UpperCamelCase_ = tokenize_chinese_chars
UpperCamelCase_ = normalizer_class(**_UpperCAmelCase )
UpperCamelCase_ = do_lower_case
def _UpperCAmelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
UpperCamelCase_ = PaddingStrategy.MAX_LENGTH
UpperCamelCase_ = text
UpperCamelCase_ = kwargs.pop('text_pair' , _UpperCAmelCase )
UpperCamelCase_ = kwargs.pop('return_tensors' , _UpperCAmelCase )
UpperCamelCase_ = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_UpperCAmelCase ):
if batch_text_pair is not None:
UpperCamelCase_ = batch_text_pair[idx]
else:
UpperCamelCase_ = None
UpperCamelCase_ = super().__call__(_UpperCAmelCase , _UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase_ = encoded_candidates.get('input_ids' )
UpperCamelCase_ = encoded_candidates.get('attention_mask' )
UpperCamelCase_ = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_UpperCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_UpperCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_UpperCAmelCase )
UpperCamelCase_ = {key: item for key, item in output_data.items() if len(_UpperCAmelCase ) != 0}
return BatchEncoding(_UpperCAmelCase , tensor_type=_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Union[str, Any]:
UpperCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> List[int]:
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 618
| 1
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ = quote(__UpperCAmelCase )
return hfh.hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' , revision=__UpperCAmelCase )
| 31
|
import operator as op
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase , __UpperCAmelCase : int(x / y ) # noqa: E731 integer division operation
SCREAMING_SNAKE_CASE_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(__UpperCAmelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__UpperCAmelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
else:
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
SCREAMING_SNAKE_CASE_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' )
stack.append(
str(opr[x](int(__UpperCAmelCase ) , int(__UpperCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(__UpperCAmelCase ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 31
| 1
|
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__a = logging.getLogger(__name__)
__a = tf.data.AUTOTUNE
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=_lowercase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=_lowercase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=_lowercase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=_lowercase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=_lowercase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=_lowercase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=_lowercase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=_lowercase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=_lowercase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=_lowercase , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=_lowercase , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=_lowercase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=_lowercase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=_lowercase , required=_lowercase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=_lowercase , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCAmelCase_ : str = parser.parse_args()
return args
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
try:
if args.tpu_name:
UpperCAmelCase_ : int = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase_ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(_lowercase )
tf.tpu.experimental.initialize_tpu_system(_lowercase )
return tpu
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = 0
for file in file_list:
UpperCAmelCase_ : Union[str, Any] = file.split('''/''' )[-1]
UpperCAmelCase_ : Optional[int] = re.search(r'''-\d+-(\d+)\.tfrecord''' , _lowercase ).group(1 )
UpperCAmelCase_ : Optional[int] = int(_lowercase )
num_samples += sample_count
return num_samples
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Any = count_samples(_lowercase )
UpperCAmelCase_ : int = tf.data.Dataset.from_tensor_slices(_lowercase )
if shuffle:
UpperCAmelCase_ : Optional[Any] = dataset.shuffle(len(_lowercase ) )
UpperCAmelCase_ : Tuple = tf.data.TFRecordDataset(_lowercase , num_parallel_reads=_lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase_ : Optional[int] = dataset.apply(tf.data.experimental.assert_cardinality(_lowercase ) )
UpperCAmelCase_ : Dict = dataset.map(_lowercase , num_parallel_calls=_lowercase )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase_ : Union[str, Any] = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase_ : Optional[int] = dataset.batch(_lowercase , drop_remainder=_lowercase )
UpperCAmelCase_ : Optional[int] = dataset.map(_lowercase , num_parallel_calls=_lowercase )
UpperCAmelCase_ : Tuple = dataset.prefetch(_lowercase )
return dataset
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not args.no_tpu:
UpperCAmelCase_ : List[Any] = initialize_tpu(_lowercase )
UpperCAmelCase_ : Optional[int] = tf.distribute.TPUStrategy(_lowercase )
else:
UpperCAmelCase_ : Any = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase_ : Dict = tokenizer.vocab_size
UpperCAmelCase_ : int = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase_ : Optional[int] = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase_ : Tuple = count_samples(_lowercase )
UpperCAmelCase_ : Dict = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase_ : Optional[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase_ : List[Any] = TFAutoModelForMaskedLM.from_config(_lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase_, UpperCAmelCase_ : List[str] = create_optimizer(
num_train_steps=_lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowercase , metrics=['''accuracy'''] )
def decode_fn(_lowercase ):
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowercase , _lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase_ : str = DataCollatorForLanguageModeling(
tokenizer=_lowercase , mlm_probability=args.mlm_probability , mlm=_lowercase , return_tensors='''tf''' )
def mask_with_collator(_lowercase ):
# TF really needs an isin() function
UpperCAmelCase_ : Optional[Any] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
UpperCAmelCase_, UpperCAmelCase_ : List[str] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(_lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowercase , )
return batch
UpperCAmelCase_ : List[str] = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase_ : Optional[Any] = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase_ : Union[str, Any] = prepare_dataset(
_lowercase , decode_fn=_lowercase , mask_fn=_lowercase , batch_size=_lowercase , shuffle=_lowercase , )
UpperCAmelCase_ : int = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowercase ) )
model.fit(
_lowercase , validation_data=_lowercase , epochs=args.num_epochs , callbacks=_lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__a = parse_args()
main(args)
| 300
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = tokenizer('''This is me''' ,return_tensors='''pt''' )
UpperCAmelCase_ : int = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase_ : List[str] = model.generate(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase_ : Optional[Any] = model_reloaded.generate(**_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Dict = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model.reverse_bettertransformer()
model.save_pretrained(_SCREAMING_SNAKE_CASE )
| 300
| 1
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a__ (__lowercase :Optional[Any] , __lowercase :Tuple , __lowercase :Optional[int] = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
_A : Union[str, Any] = quote(snake_case__ )
return hfh.hf_hub_url(snake_case__ , snake_case__ , repo_type='''dataset''' , revision=snake_case__ )
| 206
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''spiece.model'''}
_snake_case = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class _snake_case ( _lowercase ):
def __init__( self: List[str] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[Any]=False , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=False , __lowerCamelCase: str="<s>" , __lowerCamelCase: str="</s>" , __lowerCamelCase: Tuple="<unk>" , __lowerCamelCase: str="<sep>" , __lowerCamelCase: Optional[int]="<pad>" , __lowerCamelCase: List[Any]="<cls>" , __lowerCamelCase: List[Any]="<mask>" , __lowerCamelCase: int=["<eop>", "<eod>"] , __lowerCamelCase: Optional[Dict[str, Any]] = None , **__lowerCamelCase: Any , ) -> None:
__UpperCAmelCase : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : str = do_lower_case
__UpperCAmelCase : int = remove_space
__UpperCAmelCase : str = keep_accents
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
__UpperCAmelCase : int = jieba
__UpperCAmelCase : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCamelCase ( self: List[str] ) -> List[Any]:
return len(self.sp_model )
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Tuple = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ) -> int:
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self: List[Any] , __lowerCamelCase: int ) -> Dict:
__UpperCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Optional[Any] ) -> List[str]:
if self.remove_space:
__UpperCAmelCase : List[str] = " ".join(inputs.strip().split() )
else:
__UpperCAmelCase : Tuple = inputs
__UpperCAmelCase : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFKD" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
__UpperCAmelCase : Optional[int] = outputs.lower()
return outputs
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = self.preprocess_text(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
__UpperCAmelCase : int = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase : Optional[int] = cur_pieces[1:]
else:
__UpperCAmelCase : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Optional[int] ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def _lowerCamelCase ( self: str , __lowerCamelCase: int ) -> Optional[Any]:
return self.sp_model.IdToPiece(__lowerCamelCase )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> Optional[int]:
__UpperCAmelCase : List[Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def _lowerCamelCase ( self: Any , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None , __lowerCamelCase: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1, 1]
return ([0] * len(__lowerCamelCase )) + [1, 1]
def _lowerCamelCase ( self: Dict , __lowerCamelCase: List[int] , __lowerCamelCase: Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : str = [self.sep_token_id]
__UpperCAmelCase : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
__UpperCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def _lowerCamelCase ( self: Any , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Optional[Any] ) -> Any:
__UpperCAmelCase : Dict = super()._decode(*__lowerCamelCase , **__lowerCamelCase )
__UpperCAmelCase : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 382
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase : Optional[Any] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowerCAmelCase : Dict = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
__lowerCAmelCase : Union[str, Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Tuple = ["input_ids", "attention_mask"]
UpperCamelCase_ : int = DistilBertTokenizer
def __init__( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : int=None , snake_case__ : Tuple=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[str]="[PAD]" , snake_case__ : int="[CLS]" , snake_case__ : Any="[MASK]" , snake_case__ : str=True , snake_case__ : str=None , **snake_case__ : Tuple , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(snake_case__ , normalizer_state.pop("""type""" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**snake_case__ )
lowerCAmelCase__ = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int=None ):
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 674
|
"""simple docstring"""
from math import pi, sqrt
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCAmelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase : Dict = 1.0
while num:
__lowerCAmelCase : Any = float(input("Gamma of: "))
print(F"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 674
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Dict = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 556
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
A = BartTokenizer
def __init__( self ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE="replace" ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="</s>" ,__SCREAMING_SNAKE_CASE="<s>" ,__SCREAMING_SNAKE_CASE="<unk>" ,__SCREAMING_SNAKE_CASE="<pad>" ,__SCREAMING_SNAKE_CASE="<mask>" ,__SCREAMING_SNAKE_CASE=False ,__SCREAMING_SNAKE_CASE=True ,**__SCREAMING_SNAKE_CASE ,):
super().__init__(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,tokenizer_file=__SCREAMING_SNAKE_CASE ,errors=__SCREAMING_SNAKE_CASE ,bos_token=__SCREAMING_SNAKE_CASE ,eos_token=__SCREAMING_SNAKE_CASE ,sep_token=__SCREAMING_SNAKE_CASE ,cls_token=__SCREAMING_SNAKE_CASE ,unk_token=__SCREAMING_SNAKE_CASE ,pad_token=__SCREAMING_SNAKE_CASE ,mask_token=__SCREAMING_SNAKE_CASE ,add_prefix_space=__SCREAMING_SNAKE_CASE ,trim_offsets=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
SCREAMING_SNAKE_CASE : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE ,pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : Tuple = pre_tok_class(**__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : int = 'post_processor'
SCREAMING_SNAKE_CASE : List[Any] = getattr(self.backend_tokenizer ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : str = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Tuple = tuple(state['cls'] )
SCREAMING_SNAKE_CASE : Tuple = False
if state.get('add_prefix_space' ,__SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE : int = add_prefix_space
SCREAMING_SNAKE_CASE : Any = True
if state.get('trim_offsets' ,__SCREAMING_SNAKE_CASE ) != trim_offsets:
SCREAMING_SNAKE_CASE : Optional[int] = trim_offsets
SCREAMING_SNAKE_CASE : List[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE ,state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[int] = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
@property
def __a ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = AddedToken(__SCREAMING_SNAKE_CASE ,lstrip=__SCREAMING_SNAKE_CASE ,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __a ( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.get('is_split_into_words' ,__SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = kwargs.get('is_split_into_words' ,__SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE ,name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 220
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
a_ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase =["""input_ids""", """attention_mask"""]
UpperCamelCase =GPTaTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ) -> int:
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
__lowercase : Union[str, Any] = kwargs.pop('''add_bos_token''' , A_ )
__lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A_ ) != add_prefix_space:
__lowercase : Optional[Any] = getattr(A_ , pre_tok_state.pop('''type''' ) )
__lowercase : Any = add_prefix_space
__lowercase : int = pre_tok_class(**A_ )
__lowercase : List[Any] = add_prefix_space
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
__lowercase : int = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
__lowercase : Any = kwargs.get('''is_split_into_words''' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> str:
__lowercase : List[str] = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
__lowercase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
__lowercase : Any = input_ids[-self.model_max_length :]
return input_ids
| 76
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[int] = object()
# For specifying empty leaf dict `{}`
_A : Tuple = object()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def __snake_case ( lowerCAmelCase_ ) -> int:
def replace(lowerCAmelCase_ , lowerCAmelCase_ ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def __snake_case ( ) -> Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
SCREAMING_SNAKE_CASE__ = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 100
| 0
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_lowerCamelCase : str = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase : Any = 12_8022
_lowerCamelCase : Any = 12_8028
@require_sentencepiece
class snake_case__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
__A = MaMaaaTokenizer
__A = False
__A = False
__A = True
def UpperCamelCase ( self : str ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Optional[Any] , **lowerCAmelCase_ : int ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def UpperCamelCase ( self : Any , lowerCAmelCase_ : List[str] ) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def UpperCamelCase ( self : List[Any] ) -> Dict:
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''</s>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''<s>''' )
self.assertEqual(len(lowerCAmelCase_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip('''Skip this test while all models are still to be uploaded.''' )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
pass
def UpperCamelCase ( self : int ) -> List[Any]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , '''This is a test''' )
@slow
def UpperCamelCase ( self : int ) -> List[Any]:
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
__A = '''facebook/m2m100_418M'''
__A = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__A = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__A = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def UpperCamelCase ( cls : List[Any] ) -> int:
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' )
UpperCAmelCase_ = 1
return cls
def UpperCamelCase ( self : Dict ) -> List[str]:
self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_80_06 )
self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_80_22 )
self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_80_76 )
self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_80_63 )
def UpperCamelCase ( self : Dict ) -> str:
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab['''<unk>'''] , 3 )
self.assertIn(self.tokenizer.get_lang_token('''en''' ) , lowerCAmelCase_ )
def UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def UpperCamelCase ( self : Any ) -> Optional[Any]:
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase_ = [FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def UpperCamelCase ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase_ )
@require_torch
def UpperCamelCase ( self : Dict ) -> Dict:
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors='''pt''' )
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def UpperCamelCase ( self : int ) -> str:
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def UpperCamelCase ( self : List[Any] ) -> str:
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# en_XX, A, test, EOS
'''input_ids''': [[12_80_22, 58, 41_83, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 12_80_06,
} , )
| 407
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''decision_transformer'''
__A = ['''past_key_values''']
__A = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=17 , lowerCAmelCase_ : List[str]=4 , lowerCAmelCase_ : List[str]=1_28 , lowerCAmelCase_ : str=40_96 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=10_24 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Optional[int]=1e-5 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=5_02_56 , lowerCAmelCase_ : Any=5_02_56 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[str]=False , **lowerCAmelCase_ : List[Any] , ) -> Any:
UpperCAmelCase_ = state_dim
UpperCAmelCase_ = act_dim
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = max_ep_len
UpperCAmelCase_ = action_tanh
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = n_inner
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = attn_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scale_attn_weights
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ = reorder_and_upcast_attn
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 407
| 1
|
import numpy as np
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case = 1E-12 , __snake_case = 100 , ):
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__snake_case ) == np.iscomplexobj(__snake_case )
__lowerCAmelCase = np.iscomplexobj(__snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(__snake_case , __snake_case )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(__snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(__snake_case , np.dot(__snake_case , __snake_case ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(__snake_case , __snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(__snake_case )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__snake_case ) - np.abs(__snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase (a_ ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (("""num_inference_steps""", 50),)
def __UpperCAmelCase ( self , **__UpperCamelCase )-> int:
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__UpperCamelCase )
return config
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self )-> Dict:
pass
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Dict:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self )-> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCAmelCase ( self )-> int:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
def __UpperCAmelCase ( self )-> Dict:
with self.assertRaises(__UpperCamelCase ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __UpperCAmelCase ( self )-> Any:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __UpperCAmelCase ( self )-> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 367
| 1
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def snake_case_ ( self , a__=False):
if class_cond:
A__ = self.dummy_cond_unet
else:
A__ = self.dummy_uncond_unet
# Default to CM multistep sampler
A__ = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
A__ = {
"unet": unet,
"scheduler": scheduler,
}
return components
def snake_case_ ( self , a__ , a__=0):
if str(__a).startswith('''mps'''):
A__ = torch.manual_seed(__a)
else:
A__ = torch.Generator(device=__a).manual_seed(__a)
A__ = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [2_2, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def snake_case_ ( self):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = ConsistencyModelPipeline(**__a)
A__ = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_dummy_inputs(__a)
A__ = pipe(**__a).images
assert image.shape == (1, 3_2, 3_2, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def snake_case_ ( self):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components(class_cond=__a)
A__ = ConsistencyModelPipeline(**__a)
A__ = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_dummy_inputs(__a)
A__ = 0
A__ = pipe(**__a).images
assert image.shape == (1, 3_2, 3_2, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def snake_case_ ( self):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = ConsistencyModelPipeline(**__a)
A__ = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_dummy_inputs(__a)
A__ = 1
A__ = None
A__ = pipe(**__a).images
assert image.shape == (1, 3_2, 3_2, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def snake_case_ ( self):
A__ = "cpu" # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components(class_cond=__a)
A__ = ConsistencyModelPipeline(**__a)
A__ = pipe.to(__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_dummy_inputs(__a)
A__ = 1
A__ = None
A__ = 0
A__ = pipe(**__a).images
assert image.shape == (1, 3_2, 3_2, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self , a__=0 , a__=False , a__="cpu" , a__=torch.floataa , a__=(1, 3, 6_4, 6_4)):
A__ = torch.manual_seed(__a)
A__ = {
"num_inference_steps": None,
"timesteps": [2_2, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
A__ = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a)
A__ = latents
return inputs
def snake_case_ ( self , a__=0 , a__="cpu" , a__=torch.floataa , a__=(1, 3, 6_4, 6_4)):
if type(__a) == str:
A__ = torch.device(__a)
A__ = torch.Generator(device=__a).manual_seed(__a)
A__ = randn_tensor(__a , generator=__a , device=__a , dtype=__a)
return latents
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''')
A__ = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
A__ = ConsistencyModelPipeline(unet=__a , scheduler=__a)
pipe.to(torch_device=__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_inputs()
A__ = pipe(**__a).images
assert image.shape == (1, 6_4, 6_4, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''')
A__ = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
A__ = ConsistencyModelPipeline(unet=__a , scheduler=__a)
pipe.to(torch_device=__a)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_inputs()
A__ = 1
A__ = None
A__ = pipe(**__a).images
assert image.shape == (1, 6_4, 6_4, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''')
A__ = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
A__ = ConsistencyModelPipeline(unet=__a , scheduler=__a)
pipe.to(torch_device=__a , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_inputs(get_fixed_latents=__a , device=__a)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a):
A__ = pipe(**__a).images
assert image.shape == (1, 6_4, 6_4, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def snake_case_ ( self):
A__ = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''')
A__ = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
A__ = ConsistencyModelPipeline(unet=__a , scheduler=__a)
pipe.to(torch_device=__a , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=__a)
A__ = self.get_inputs(get_fixed_latents=__a , device=__a)
A__ = 1
A__ = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a):
A__ = pipe(**__a).images
assert image.shape == (1, 6_4, 6_4, 3)
A__ = image[0, -3:, -3:, -1]
A__ = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 707
|
import numpy as np
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase__ ( UpperCamelCase_ : np.array )-> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 526
| 0
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = filter(lambda A_ : p.requires_grad , model.parameters() )
lowerCAmelCase__ : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCamelCase : int = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if metric == "rouge2":
lowerCAmelCase__ : Optional[int] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCAmelCase__ : Union[str, Any] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCAmelCase__ : Dict = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCAmelCase__ : Dict = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
''' function.''' )
lowerCAmelCase__ : List[str] = ModelCheckpoint(
dirpath=__lowerCamelCase , filename=__lowerCamelCase , monitor=f'val_{metric}' , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return EarlyStopping(
monitor=f'val_{metric}' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__lowerCamelCase , verbose=__lowerCamelCase , )
class SCREAMING_SNAKE_CASE ( pl.Callback ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ,lowercase_ : Optional[Any] ,lowercase_ : List[str] ):
lowerCAmelCase__ : List[Any] = {F'lr_group_{i}': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowercase_ )
@rank_zero_only
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : pl.Trainer ,lowercase_ : pl.LightningModule ,lowercase_ : str ,lowercase_ : Tuple=True ):
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
lowerCAmelCase__ : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCAmelCase__ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase__ : Optional[Any] = od / '''test_results.txt'''
lowerCAmelCase__ : List[str] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase__ : int = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
lowerCAmelCase__ : List[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=lowercase_ )
generations_file.parent.mkdir(exist_ok=lowercase_ )
with open(lowercase_ ,'''a+''' ) as writer:
for key in sorted(lowercase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase__ : Any = metrics[key]
if isinstance(lowercase_ ,torch.Tensor ):
lowerCAmelCase__ : Optional[int] = val.item()
lowerCAmelCase__ : Union[str, Any] = F'{key}: {val:.6f}\n'
writer.write(lowercase_ )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase__ : List[str] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowercase_ )
@rank_zero_only
def __lowerCAmelCase ( self : int ,lowercase_ : int ,lowercase_ : Union[str, Any] ):
try:
lowerCAmelCase__ : str = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase__ : Any = pl_module.model.num_parameters()
lowerCAmelCase__ : List[str] = count_trainable_parameters(lowercase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Dict ,lowercase_ : pl.Trainer ,lowercase_ : pl.LightningModule ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(lowercase_ ,lowercase_ ,'''test''' )
@rank_zero_only
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : pl.Trainer ,lowercase_ : Dict ):
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 450
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCAmelCase_ : Optional[int] = TypeVar("_T")
class UpperCamelCase__ ( Generic[_T] ):
def __init__( self : Dict , lowerCamelCase : Iterable[_T] | None = None ):
'''simple docstring'''
a__ = list(iterable or [] )
a__ = []
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
'''simple docstring'''
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def __a ( self : Union[str, Any] , lowerCamelCase : _T ):
'''simple docstring'''
self._stacka.append(lowerCamelCase )
def __a ( self : Dict ):
'''simple docstring'''
a__ = self._stacka.pop
a__ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 489
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''roberta'''
def __init__( self , snake_case_=50_265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ) -> str:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 573
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = SwinConfig()
__lowerCAmelCase = swin_name.split("""_""" )
__lowerCAmelCase = name_split[1]
__lowerCAmelCase = int(name_split[4] )
__lowerCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 6, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
__lowerCAmelCase = 128
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
else:
__lowerCAmelCase = 192
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
__lowerCAmelCase = 2_1841
else:
__lowerCAmelCase = 1000
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """imagenet-1k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = img_size
__lowerCAmelCase = num_classes
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
return config
def lowercase (_lowerCAmelCase ):
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase = """encoder.""" + name
if "attn.proj" in name:
__lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
__lowerCAmelCase = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase = """swin.""" + name
return name
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = int(key_split[3] )
__lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[
:dim
]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[
-dim:
]
else:
__lowerCAmelCase = val
return orig_state_dict
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
__lowerCAmelCase = get_swin_config(_lowerCAmelCase )
__lowerCAmelCase = SwinForImageClassification(_lowerCAmelCase )
model.eval()
__lowerCAmelCase = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
__lowerCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
__lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
__lowerCAmelCase = timm_model(inputs["""pixel_values"""] )
__lowerCAmelCase = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 573
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Tuple , *a__ : Dict , **a__ : Optional[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : List[str] , *a__ : Union[str, Any] , **a__ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[Any] , *a__ : List[Any] , **a__ : Union[str, Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Union[str, Any] , *a__ : int , **a__ : Optional[int] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Tuple , *a__ : Union[str, Any] , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : List[Any] , *a__ : List[str] , **a__ : List[Any] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Dict , *a__ : int , **a__ : str ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Tuple , *a__ : str , **a__ : Optional[int] ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[Any] , *a__ : int , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Optional[Any] , *a__ : Union[str, Any] , **a__ : List[str] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[int] , *a__ : Union[str, Any] , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Optional[int] , *a__ : str , **a__ : int ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Optional[int] , *a__ : Dict , **a__ : List[Any] ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : List[str] , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : int , **a__ : Any ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCAmelCase__ ( metaclass=UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["torch", "transformers", "onnx"]
def __init__( self : Any , *a__ : Tuple , **a__ : int ):
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : int , *a__ : List[Any] , **a__ : str ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def __snake_case ( cls : Dict , *a__ : Tuple , **a__ : Tuple ):
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 51
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Tuple = 'gpt_neo'
_UpperCamelCase : Optional[Any] = ['past_key_values']
_UpperCamelCase : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case_=50_257 , snake_case_=2_048 , snake_case_=2_048 , snake_case_=24 , snake_case_=[[["global", "local"], 12]] , snake_case_=16 , snake_case_=None , snake_case_=256 , snake_case_="gelu_new" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=1E-5 , snake_case_=0.02 , snake_case_=True , snake_case_=50_256 , snake_case_=50_256 , **snake_case_ , ):
'''simple docstring'''
A__ : int = vocab_size
A__ : Optional[Any] = max_position_embeddings
A__ : int = hidden_size
A__ : List[Any] = num_layers
A__ : Any = num_heads
A__ : List[str] = intermediate_size
A__ : Dict = window_size
A__ : Optional[int] = activation_function
A__ : Optional[int] = resid_dropout
A__ : List[str] = embed_dropout
A__ : str = attention_dropout
A__ : List[str] = classifier_dropout
A__ : str = layer_norm_epsilon
A__ : str = initializer_range
A__ : Any = use_cache
A__ : List[str] = bos_token_id
A__ : Any = eos_token_id
A__ : Optional[Any] = attention_types
A__ : List[Any] = self.expand_attention_types_params(snake_case_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
@staticmethod
def lowerCamelCase ( snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Union[str, Any] = input.size()
A__ : str = len(lowerCAmelCase )
A__ : Union[str, Any] = shape[dimension]
A__ : Dict = torch.arange(0 , lowerCAmelCase , lowerCAmelCase )
A__ : Dict = torch.div(sizedim - size , lowerCAmelCase , rounding_mode="""floor""" ) + 1
A__ : str = torch.arange(lowerCAmelCase ) + low_indices[:min_length][:, None]
A__ : str = [slice(lowerCAmelCase )] * rank
A__ : Optional[Any] = indices
A__ : Tuple = input[s]
A__ : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase )
def _A( lowerCAmelCase , lowerCAmelCase ):
import torch
A__ : Optional[Any] = torch.arange(1 , lowerCAmelCase )
A__ : int = torch.remainder(lowerCAmelCase , lowerCAmelCase )
A__ : str = remainders == 0
A__ : Tuple = candidates[divisor_indices]
A__ : Any = torch.max(lowerCAmelCase )
return largest_divisor, torch.div(lowerCAmelCase , lowerCAmelCase , rounding_mode="""floor""" )
class __UpperCAmelCase (__A ):
'''simple docstring'''
@property
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Any = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" )
A__ : str = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._config.num_heads
def lowerCamelCase ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
'''simple docstring'''
A__ : Optional[int] = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ , A__ : Dict = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : Any = seqlen + 2
A__ : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : List[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
A__ : Tuple = common_inputs["""attention_mask"""]
if self.use_past:
A__ : List[Any] = ordered_inputs["""attention_mask"""].dtype
A__ : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return 13
| 363
| 0
|
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowercase__ ( ctypes.Structure ):
A__ : Optional[Any] =[("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def _lowercase ( ) -> str:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ = CursorInfo()
SCREAMING_SNAKE_CASE__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def _lowercase ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ = CursorInfo()
SCREAMING_SNAKE_CASE__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def _lowercase ( ) -> List[str]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 702
|
from collections import defaultdict
class lowercase__ :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
SCREAMING_SNAKE_CASE__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCAmelCase_ ) )
]
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
SCREAMING_SNAKE_CASE__ = (1 << len(UpperCAmelCase_ )) - 1
def A_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
SCREAMING_SNAKE_CASE__ = self.count_ways_until(UpperCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
SCREAMING_SNAKE_CASE__ = total_ways_util
return self.dp[mask][task_no]
def A_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
# Store the list of persons for each task
for i in range(len(UpperCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 400
| 0
|
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
A__ : Any = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
A__ : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
A__ : Tuple = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
def A_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def A_ ( self : List[Any] , __a : int , __a : Any , __a : Tuple=None , __a : Dict=1 , __a : Any="binary" , __a : Tuple=None ) -> List[Any]:
'''simple docstring'''
__snake_case : str = fa_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a )
return {"f1": float(__a ) if score.size == 1 else score}
| 286
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def A_ ( __a : ArgumentParser ) -> List[str]:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def A_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
| 286
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase_ = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowerCAmelCase_ = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase_ = sorted(arg_to_scheduler.keys())
lowerCAmelCase_ = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _A ( pl.LightningModule ):
def __init__( self : Tuple , _A : argparse.Namespace , _A : int=None , _A : Any="base" , _A : Optional[Any]=None , _A : int=None , _A : List[str]=None , **_A : Tuple , ) -> Dict:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
lowercase : Union[str, Any] = 0
lowercase : Any = Path(self.hparams.output_dir )
lowercase : Optional[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
lowercase : Tuple = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
lowercase : PretrainedConfig = config
lowercase : List[str] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
lowercase : int = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
lowercase : PreTrainedTokenizer = tokenizer
lowercase : int = MODEL_MODES[mode]
if model is None:
lowercase : Optional[int] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
lowercase : Union[str, Any] = model
def __a ( self : Any , *_A : Tuple , **_A : Tuple ) -> Any:
"""simple docstring"""
lowercase : Dict = self.model_type.from_pretrained(*_A , **_A )
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase : str = arg_to_scheduler[self.hparams.lr_scheduler]
lowercase : Dict = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
lowercase : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = self.model
lowercase : Any = ['''bias''', '''LayerNorm.weight''']
lowercase : Optional[int] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
lowercase : Any = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
lowercase : Optional[Any] = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
lowercase : Optional[int] = optimizer
lowercase : Tuple = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __a ( self : Dict , _A : Union[str, Any] , _A : Tuple ) -> str:
"""simple docstring"""
return self.validation_step(_A , _A )
def __a ( self : int , _A : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.validation_end(_A )
def __a ( self : Tuple ) -> int:
"""simple docstring"""
lowercase : str = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
lowercase : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __a ( self : Dict , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if stage == "test":
lowercase : Any = len(self.test_dataloader().dataset )
else:
lowercase : List[str] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_A )
lowercase : List[Any] = len(self.train_dataloader().dataset )
def __a ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> List[str]:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def __a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_A )
def __a ( self : int , _A : str ) -> List[str]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __a ( self : List[Any] , _A : Dict[str, Any] ) -> None:
"""simple docstring"""
lowercase : Optional[int] = self.output_dir.joinpath('''best_tfmr''' )
lowercase : Union[str, Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __a ( _A : int , _A : List[str] ) -> List[Any]:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_A , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_A , type=_A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_A ).parent / '''test_run''' / '''cache''' ) , type=_A , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_A , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_A , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_A , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_A , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=_A , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_A , metavar=_A , type=_A , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_A , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_A , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_A )
parser.add_argument('''--train_batch_size''' , default=32 , type=_A )
parser.add_argument('''--eval_batch_size''' , default=32 , type=_A )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _A ( pl.Callback ):
def __a ( self : Tuple , _A : str , _A : List[Any] ) -> Optional[int]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _A ( pl.Callback ):
def __a ( self : Optional[int] , _A : Union[str, Any] , _A : Optional[int] ) -> Dict:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _A ( pl.Callback ):
def __a ( self : Tuple , _A : List[str] , _A : str ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = trainer.lr_schedulers[0]['''scheduler''']
lowercase : List[str] = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __a ( self : int , _A : pl.Trainer , _A : pl.LightningModule ) -> int:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
lowercase : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_A , str(metrics[key] ) ) )
def __a ( self : List[Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Tuple:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
lowercase : Optional[int] = trainer.callback_metrics
# Log and save results to file
lowercase : Dict = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_A , '''w''' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_A , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_A , str(metrics[key] ) ) )
def snake_case( __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(__magic_name__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__magic_name__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__magic_name__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__magic_name__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__magic_name__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__magic_name__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__magic_name__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__magic_name__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__magic_name__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=True , __magic_name__=[] , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Optional[int]:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
lowercase : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__magic_name__ )
# add custom checkpoints
if checkpoint_callback is None:
lowercase : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__magic_name__ )
if logging_callback is None:
lowercase : List[Any] = LoggingCallback()
lowercase : int = {}
if args.fpaa:
lowercase : List[str] = 16
if args.gpus > 1:
lowercase : Optional[int] = '''auto'''
lowercase : Tuple = '''ddp'''
lowercase : Any = args.accumulate_grad_batches
lowercase : Tuple = None
lowercase : str = '''auto'''
lowercase : str = pl.Trainer.from_argparse_args(
__magic_name__ , weights_summary=__magic_name__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__magic_name__ , val_check_interval=1 , num_sanity_val_steps=2 , **__magic_name__ , )
if args.do_train:
trainer.fit(__magic_name__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 718
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A :
_UpperCamelCase : Dict = None
@experimental
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return _map_with_joblib(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
lowercase : Tuple = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
lowercase : Optional[int] = len(__magic_name__ ) // num_proc
lowercase : List[str] = len(__magic_name__ ) % num_proc
lowercase : Union[str, Any] = div * index + min(__magic_name__ , __magic_name__ )
lowercase : List[str] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(__magic_name__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
lowercase , lowercase : Optional[int] = None, None
if not disable_tqdm:
lowercase , lowercase : Any = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ , initargs=__magic_name__ , initializer=__magic_name__ ) as pool:
lowercase : Tuple = pool.map(__magic_name__ , __magic_name__ )
logger.info(F"""Finished {num_proc} processes""" )
lowercase : Union[str, Any] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(__magic_name__ )} objects""" )
return mapped
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def snake_case( __magic_name__ ) -> List[Any]:
'''simple docstring'''
lowercase : int = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowercase : List[Any] = None
| 596
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCamelCase__ : List[str] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
lowerCamelCase__ : List[str] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
lowerCamelCase__ : List[str] = """|""".join(sys.argv[1:])
lowerCamelCase__ : Optional[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""")
lowerCamelCase__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 33
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
snake_case__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __magic_name__ (snake_case_ ,snake_case_ ,snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = StableDiffusionLatentUpscalePipeline
__lowercase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowercase : List[Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowercase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowercase : List[Any] = frozenset([] )
__lowercase : Any = True
@property
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = 1
snake_case__ = 4
snake_case__ = (16, 16)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_a , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_a , only_cross_attention=_a , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
snake_case__ = EulerDiscreteScheduler(prediction_type='''sample''' )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''quick_gelu''' , projection_dim=5_12 , )
snake_case__ = CLIPTextModel(_a )
snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:List[str]=0 ):
if str(_a ).startswith('''mps''' ):
snake_case__ = torch.manual_seed(_a )
else:
snake_case__ = torch.Generator(device=_a ).manual_seed(_a )
snake_case__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = '''cpu'''
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = pipe(**_a ).images
snake_case__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
snake_case__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
snake_case__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
super().test_save_load_local(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:str ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = self.get_dummy_inputs(_a )
snake_case__ = 2
snake_case__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
snake_case__ = getattr(_a , scheduler_enum.name )
snake_case__ = scheduler_cls.from_config(pipe.scheduler.config )
snake_case__ = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
snake_case__ = pipe(_a , generator=_a , output_type='''latent''' ).images
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = torch.manual_seed(33 )
snake_case__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
snake_case__ = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
snake_case__ = upscaler(
prompt=_a , image=_a , num_inference_steps=20 , guidance_scale=0 , generator=_a , output_type='''np''' , ).images[0]
snake_case__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 33
| 1
|
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24
| 1
|
'''simple docstring'''
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> str:
lowerCAmelCase = name
lowerCAmelCase = val
def __str__( self ) -> Tuple:
return f'{self.__class__.__name__}({self.name}, {self.val})'
def __lt__( self , A_ ) -> Any:
return self.val < other.val
class __snake_case:
'''simple docstring'''
def __init__( self , A_ ) -> Any:
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = self.build_heap(lowercase_ )
def __getitem__( self , A_ ) -> List[str]:
return self.get_value(lowercase_ )
def __snake_case ( self , A_ ) -> Optional[int]:
return (idx - 1) // 2
def __snake_case ( self , A_ ) -> int:
return idx * 2 + 1
def __snake_case ( self , A_ ) -> List[Any]:
return idx * 2 + 2
def __snake_case ( self , A_ ) -> Any:
return self.heap_dict[key]
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = len(lowercase_ ) - 1
lowerCAmelCase = self.get_parent_idx(lowercase_ )
for idx, i in enumerate(lowercase_ ):
lowerCAmelCase = idx
lowerCAmelCase = i.val
for i in range(lowercase_ , -1 , -1 ):
self.sift_down(lowercase_ , lowercase_ )
return array
def __snake_case ( self , A_ , A_ ) -> List[str]:
while True:
lowerCAmelCase = self.get_left_child_idx(lowercase_ ) # noqa: E741
lowerCAmelCase = self.get_right_child_idx(lowercase_ )
lowerCAmelCase = idx
if l < len(lowercase_ ) and array[l] < array[idx]:
lowerCAmelCase = l
if r < len(lowercase_ ) and array[r] < array[smallest]:
lowerCAmelCase = r
if smallest != idx:
lowerCAmelCase, lowerCAmelCase = array[smallest], array[idx]
(
(
lowerCAmelCase
), (
lowerCAmelCase
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase = smallest
else:
break
def __snake_case ( self , A_ ) -> Dict:
lowerCAmelCase = self.get_parent_idx(lowercase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase, lowerCAmelCase = self.heap[idx], self.heap[p]
lowerCAmelCase, lowerCAmelCase = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase = p
lowerCAmelCase = self.get_parent_idx(lowercase_ )
def __snake_case ( self ) -> List[str]:
return self.heap[0]
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase, lowerCAmelCase = self.heap[-1], self.heap[0]
lowerCAmelCase, lowerCAmelCase = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __snake_case ( self , A_ ) -> Tuple:
self.heap.append(lowercase_ )
lowerCAmelCase = len(self.heap ) - 1
lowerCAmelCase = node.val
self.sift_up(len(self.heap ) - 1 )
def __snake_case ( self ) -> Union[str, Any]:
return len(self.heap ) == 0
def __snake_case ( self , A_ , A_ ) -> int:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase = new_value
lowerCAmelCase = new_value
self.sift_up(self.idx_of_element[node] )
UpperCAmelCase = Node('R', -1)
UpperCAmelCase = Node('B', 6)
UpperCAmelCase = Node('A', 3)
UpperCAmelCase = Node('X', 1)
UpperCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 433
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = SamImageProcessor()
__SCREAMING_SNAKE_CASE : Union[str, Any] = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def a_ ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Any = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Dict = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : List[Any] = processor(images=a__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : int = [torch.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE : str = [[1764, 2646]]
__SCREAMING_SNAKE_CASE : Optional[Any] = [[683, 1024]]
__SCREAMING_SNAKE_CASE : Optional[int] = processor.post_process_masks(a__ , a__ , a__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE : int = processor.post_process_masks(
a__ , torch.tensor(a__ ) , torch.tensor(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__SCREAMING_SNAKE_CASE : int = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
@require_vision
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : int = SamImageProcessor()
__SCREAMING_SNAKE_CASE : Tuple = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def a_ ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : int = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[Any] = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Optional[int] = processor(images=a__ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [tf.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[1764, 2646]]
__SCREAMING_SNAKE_CASE : Tuple = [[683, 1024]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.post_process_masks(a__ , a__ , a__ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE : Dict = processor.post_process_masks(
a__ , tf.convert_to_tensor(a__ ) , tf.convert_to_tensor(a__ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__SCREAMING_SNAKE_CASE : int = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE : List[Any] = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE : Tuple = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__SCREAMING_SNAKE_CASE : str = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[str] = SamImageProcessor()
__SCREAMING_SNAKE_CASE : List[str] = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def a_ ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : int = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = [tf.convert_to_tensor(a__ )]
__SCREAMING_SNAKE_CASE : Optional[Any] = [torch.tensor(a__ )]
__SCREAMING_SNAKE_CASE : int = [[1764, 2646]]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[683, 1024]]
__SCREAMING_SNAKE_CASE : Tuple = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="tf" )
__SCREAMING_SNAKE_CASE : Any = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = SamProcessor(image_processor=a__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Any = image_processor(a__ , return_tensors="pt" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="pt" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="tf" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE : str = processor(images=a__ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
| 721
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__SCREAMING_SNAKE_CASE : int = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__SCREAMING_SNAKE_CASE : Tuple = left
__SCREAMING_SNAKE_CASE : Any = point
elif point > right:
__SCREAMING_SNAKE_CASE : Tuple = right
__SCREAMING_SNAKE_CASE : Dict = point
else:
if item < current_item:
__SCREAMING_SNAKE_CASE : Optional[int] = point - 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = point + 1
return None
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__SCREAMING_SNAKE_CASE : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase = 0
if debug == 1:
lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowercase = 67
lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 564
| 0
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_a = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
| 562
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562
| 1
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@register_to_config
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False , ):
'''simple docstring'''
super().__init__()
_UpperCamelCase : Optional[Any] = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase : Dict = False
_UpperCamelCase : Dict = nn.Dropout(p=lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = TaConfig(
vocab_size=lowerCAmelCase__ , d_model=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , feed_forward_proj=lowerCAmelCase__ , is_decoder=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , )
_UpperCamelCase : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
_UpperCamelCase : Optional[Any] = TaBlock(lowerCAmelCase__ )
self.encoders.append(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = TaLayerNorm(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = nn.Dropout(p=lowerCAmelCase__ )
def lowercase_ (self , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.token_embedder(lowerCAmelCase__ )
_UpperCamelCase : Any = encoder_input_tokens.shape[1]
_UpperCamelCase : Optional[int] = torch.arange(lowerCAmelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = self.dropout_pre(lowerCAmelCase__ )
# inverted the attention mask
_UpperCamelCase : Tuple = encoder_input_tokens.size()
_UpperCamelCase : Dict = self.get_extended_attention_mask(lowerCAmelCase__ , lowerCAmelCase__ )
for lyr in self.encoders:
_UpperCamelCase : List[str] = lyr(lowerCAmelCase__ , lowerCAmelCase__ )[0]
_UpperCamelCase : List[str] = self.layer_norm(lowerCAmelCase__ )
return self.dropout_post(lowerCAmelCase__ ), encoder_inputs_mask
| 239
|
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def __lowerCAmelCase ( __lowerCAmelCase : float ) -> str:
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = int(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : List[str] = False
if decimal < 0:
_UpperCamelCase : List[str] = True
decimal *= -1
while decimal > 0:
_UpperCamelCase , _UpperCamelCase : str = divmod(__lowerCAmelCase , 16 )
_UpperCamelCase : Any = values[remainder] + hexadecimal
_UpperCamelCase : Dict = "0x" + hexadecimal
if negative:
_UpperCamelCase : int = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239
| 1
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case__ : List[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
snake_case__ : Optional[Any] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ) -> None:
UpperCamelCase_ = None
for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ):
UpperCamelCase_ = Node(_UpperCAmelCase , self.head )
def __iter__( self ) -> Iterator[int]:
UpperCamelCase_ = self.head
while node:
yield node.data
UpperCamelCase_ = node.next_node
def __len__( self ) -> int:
return sum(1 for _ in self )
def __str__( self ) -> str:
return " -> ".join([str(_UpperCAmelCase ) for node in self] )
def _snake_case (__lowercase , __lowercase):
return SortedLinkedList(list(__lowercase) + list(__lowercase))
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : str = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 23
|
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[Any] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
snake_case__ : int = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
snake_case__ : Optional[int] = {
'jukebox': 5_1_2,
}
class _a ( A__ ):
"""simple docstring"""
snake_case =VOCAB_FILES_NAMES
snake_case =PRETRAINED_VOCAB_FILES_MAP
snake_case =PRETRAINED_LYRIC_TOKENS_SIZES
snake_case =["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=["v3", "v2", "v2"] , _snake_case=512 , _snake_case=5 , _snake_case="<|endoftext|>" , **_snake_case , ):
_UpperCAmelCase =AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else unk_token
super().__init__(
unk_token=_snake_case , n_genres=_snake_case , version=_snake_case , max_n_lyric_tokens=_snake_case , **_snake_case , )
_UpperCAmelCase =version
_UpperCAmelCase =max_n_lyric_tokens
_UpperCAmelCase =n_genres
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase =json.load(_snake_case )
_UpperCAmelCase =R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_UpperCAmelCase =oov.replace(R"\-'" , R"\-+'" )
_UpperCAmelCase =regex.compile(_snake_case )
_UpperCAmelCase ={v: k for k, v in self.artists_encoder.items()}
_UpperCAmelCase ={v: k for k, v in self.genres_encoder.items()}
_UpperCAmelCase ={v: k for k, v in self.lyrics_encoder.items()}
@property
def SCREAMING_SNAKE_CASE ( self ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =[self.artists_encoder.get(_snake_case , 0 ) for artist in list_artists]
for genres in range(len(_snake_case ) ):
_UpperCAmelCase =[self.genres_encoder.get(_snake_case , 0 ) for genre in list_genres[genres]]
_UpperCAmelCase =list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_UpperCAmelCase =[[self.lyrics_encoder.get(_snake_case , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return list(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , **_snake_case ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self.prepare_for_tokenization(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase =self._tokenize(_snake_case )
return artist, genre, lyrics
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_UpperCAmelCase =artists[idx].lower()
_UpperCAmelCase =[genres[idx].lower()]
else:
_UpperCAmelCase =self._normalize(artists[idx] ) + ".v2"
_UpperCAmelCase =[
self._normalize(_snake_case ) + ".v2" for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_UpperCAmelCase =regex.compile(R"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" )
_UpperCAmelCase ="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
_UpperCAmelCase ={vocab[index]: index + 1 for index in range(len(_snake_case ) )}
_UpperCAmelCase =0
_UpperCAmelCase =len(_snake_case ) + 1
_UpperCAmelCase =self.vocab
_UpperCAmelCase ={v: k for k, v in self.vocab.items()}
_UpperCAmelCase =""
else:
_UpperCAmelCase =regex.compile(R"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" )
_UpperCAmelCase =self._run_strip_accents(_snake_case )
_UpperCAmelCase =lyrics.replace("\\" , "\n" )
_UpperCAmelCase =self.out_of_vocab.sub("" , _snake_case ), [], []
return artists, genres, lyrics
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =unicodedata.normalize("NFD" , _snake_case )
_UpperCAmelCase =[]
for char in text:
_UpperCAmelCase =unicodedata.category(_snake_case )
if cat == "Mn":
continue
output.append(_snake_case )
return "".join(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
_UpperCAmelCase =(
[chr(_snake_case ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(_snake_case ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(_snake_case ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ["."]
)
_UpperCAmelCase =frozenset(_snake_case )
_UpperCAmelCase =re.compile(R"_+" )
_UpperCAmelCase ="".join([c if c in accepted else "_" for c in text.lower()] )
_UpperCAmelCase =pattern.sub("_" , _snake_case ).strip("_" )
return text
def SCREAMING_SNAKE_CASE ( self , _snake_case ):
return " ".join(_snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None , _snake_case = False ):
# Convert to TensorType
if not isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =TensorType(_snake_case )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
_UpperCAmelCase =tf.constant
_UpperCAmelCase =tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
_UpperCAmelCase =torch.tensor
_UpperCAmelCase =torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
_UpperCAmelCase =jnp.array
_UpperCAmelCase =_is_jax
else:
_UpperCAmelCase =np.asarray
_UpperCAmelCase =_is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_UpperCAmelCase =[inputs]
if not is_tensor(_snake_case ):
_UpperCAmelCase =as_tensor(_snake_case )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with 'padding=True' 'truncation=True' to have batched tensors with the same length." )
return inputs
def __call__( self , _snake_case , _snake_case , _snake_case="" , _snake_case="pt" ):
_UpperCAmelCase =[0, 0, 0]
_UpperCAmelCase =[artist] * len(self.version )
_UpperCAmelCase =[genres] * len(self.version )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self.tokenize(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self._convert_token_to_id(_snake_case , _snake_case , _snake_case )
_UpperCAmelCase =[-INFINITY] * len(full_tokens[-1] )
_UpperCAmelCase =[
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_snake_case )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_snake_case ) )
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_snake_case ) )
_UpperCAmelCase =os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_snake_case ) )
return (artists_file, genres_file, lyrics_file)
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case ):
_UpperCAmelCase =self.artists_decoder.get(_snake_case )
_UpperCAmelCase =[self.genres_decoder.get(_snake_case ) for genre in genres_index]
_UpperCAmelCase =[self.lyrics_decoder.get(_snake_case ) for character in lyric_index]
return artist, genres, lyrics
| 408
| 0
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : Dict = logging.get_logger(__name__)
__a : Optional[Any] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "efficientnet"
def __init__( self : Union[str, Any] , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 600 , UpperCamelCase_ : float = 2.0 , UpperCamelCase_ : float = 3.1 , UpperCamelCase_ : int = 8 , UpperCamelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase_ : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase_ : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase_ : List[int] = [] , UpperCamelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase_ : float = 0.25 , UpperCamelCase_ : str = "swish" , UpperCamelCase_ : int = 2_560 , UpperCamelCase_ : str = "mean" , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : float = 0.001 , UpperCamelCase_ : float = 0.99 , UpperCamelCase_ : float = 0.5 , UpperCamelCase_ : float = 0.2 , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
__A = num_channels
__A = image_size
__A = width_coefficient
__A = depth_coefficient
__A = depth_divisor
__A = kernel_sizes
__A = in_channels
__A = out_channels
__A = depthwise_padding
__A = strides
__A = num_block_repeats
__A = expand_ratios
__A = squeeze_expansion_ratio
__A = hidden_act
__A = hidden_dim
__A = pooling_type
__A = initializer_range
__A = batch_norm_eps
__A = batch_norm_momentum
__A = dropout_rate
__A = drop_connect_rate
__A = sum(UpperCamelCase_ ) * 4
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return 1e-5
| 199
|
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = set()
# Replace all the whitespace in our sentence
__A = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowercase ) == 2_6
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = [False] * 2_6
for char in input_str:
if char.islower():
__A = True
elif char.isupper():
__A = True
return all(__lowercase )
def _SCREAMING_SNAKE_CASE ( __lowercase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
__A = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=__lowercase ) )
print(timeit("""is_pangram_faster()""" , setup=__lowercase ) )
print(timeit("""is_pangram_fastest()""" , setup=__lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ["CLIPFeatureExtractor"]
__UpperCamelCase = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Sequence[int] | None = None ) -> int:
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
SCREAMING_SNAKE_CASE_ : Tuple =nums[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Any =nums[i]
SCREAMING_SNAKE_CASE_ : Optional[int] =max(UpperCAmelCase_ , ans + num , UpperCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input("""Enter number of elements : """).strip())
_lowercase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 443
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
'''simple docstring'''
__snake_case : Tuple = parent
__snake_case : Optional[Any] = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : int = use_attention_mask
__snake_case : Any = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : List[Any] = vocab_size
__snake_case : Optional[Any] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Tuple = type_vocab_size
__snake_case : Any = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Union[str, Any] = num_choices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Union[str, Any] = None
if self.use_attention_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a_ , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Union[str, Any] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
__snake_case : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : Dict = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Union[str, Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : List[str] = (1, 11, 7_68)
self.assertEqual(output.shape , a_ )
__snake_case : Dict = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 229
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 1
|
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Optional[int] =1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__magic_name__ : Union[str, Any] =n - k
# Calculate C(n,k)
for i in range(lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def lowerCAmelCase_ ( lowerCamelCase ):
return binomial_coefficient(2 * node_count , lowerCamelCase ) // (node_count + 1)
def lowerCAmelCase_ ( lowerCamelCase ):
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
__magic_name__ : List[str] =1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCAmelCase_ ( lowerCamelCase ):
return catalan_number(lowerCamelCase ) * factorial(lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 21
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''LayoutLMv3ImageProcessor'''
_lowerCamelCase = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : Any , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any):
UpperCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
UpperCamelCase__ : Dict = kwargs.pop('feature_extractor')
UpperCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase_ : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase_ : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : List[str] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
# first, apply the image processor
UpperCamelCase__ : Optional[Any] = self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
UpperCamelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ : int = features['words']
UpperCamelCase__ : int = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel values
UpperCamelCase__ : Any = features.pop('pixel_values')
if return_overflowing_tokens is True:
UpperCamelCase__ : List[Any] = self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs['overflow_to_sample_mapping'])
UpperCamelCase__ : Any = images
return encoded_inputs
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase__ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F' {len(UpperCAmelCase_)} and {len(UpperCAmelCase_)}')
return images_with_overflow
def __UpperCamelCase ( self : Tuple , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Dict):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : List[Any]):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : Any):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 715
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = (
list(range(ord('!') , ord('~') + 1)) + list(range(ord('¡') , ord('¬') + 1)) + list(range(ord('®') , ord('ÿ') + 1))
)
UpperCamelCase__ : List[Any] = bs[:]
UpperCamelCase__ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase_)
cs.append(2**8 + n)
n += 1
UpperCamelCase__ : Union[str, Any] = [chr(lowerCamelCase_) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_))
def __UpperCAmelCase ( lowerCamelCase_) -> Tuple:
UpperCamelCase__ : Any = set()
UpperCamelCase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
UpperCamelCase__ : str = char
return pairs
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict="replace" , UpperCAmelCase_ : int="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : List[Any] , ):
UpperCamelCase__ : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
UpperCamelCase__ : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
UpperCamelCase__ : int = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
UpperCamelCase__ : Tuple = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
UpperCamelCase__ : Optional[Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
UpperCamelCase__ : Any = json.load(UpperCAmelCase_)
UpperCamelCase__ : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ : Any = errors # how to handle errors in decoding
UpperCamelCase__ : Tuple = bytes_to_unicode()
UpperCamelCase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
UpperCamelCase__ : List[Any] = merges_handle.read().split('\n')[1:-1]
UpperCamelCase__ : List[Any] = [tuple(merge.split()) for merge in bpe_merges]
UpperCamelCase__ : Any = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Dict = {}
UpperCamelCase__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ : Any = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple):
return len(self.encoder)
def __UpperCamelCase ( self : Tuple):
return dict(self.encoder , **self.added_tokens_encoder)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any]):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ : Optional[int] = tuple(UpperCAmelCase_)
UpperCamelCase__ : int = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
UpperCamelCase__ : Tuple = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
UpperCamelCase__, UpperCamelCase__ : Tuple = bigram
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Optional[int] = 0
while i < len(UpperCAmelCase_):
try:
UpperCamelCase__ : Tuple = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCamelCase__ : Any = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCamelCase__ : List[str] = tuple(UpperCAmelCase_)
UpperCamelCase__ : Dict = new_word
if len(UpperCAmelCase_) == 1:
break
else:
UpperCamelCase__ : Optional[int] = get_pairs(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = word
return word
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any):
UpperCamelCase__ : Optional[Any] = []
for token in re.findall(self.pat , UpperCAmelCase_):
UpperCamelCase__ : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def __UpperCamelCase ( self : Any , UpperCAmelCase_ : Optional[int]):
return self.decoder.get(UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any] , UpperCAmelCase_ : int):
UpperCamelCase__ : int = ''.join(UpperCAmelCase_)
UpperCamelCase__ : Any = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def __UpperCamelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase__ : str = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
UpperCamelCase__ : str = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
UpperCamelCase__ : List[Any] = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : Any = [self.sep_token_id]
UpperCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __UpperCamelCase ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
UpperCamelCase__ : str = ' ' + text
return (text, kwargs)
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Dict , UpperCAmelCase_ : "Conversation"):
UpperCamelCase__ : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = ' '.join(UpperCAmelCase_)
UpperCamelCase__ : int = self.encode(UpperCAmelCase_)
if len(UpperCAmelCase_) > self.model_max_length:
UpperCamelCase__ : Optional[Any] = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.')
return input_ids
| 6
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowercase : Union[str, Any] = ['gpt2']
_lowercase : str = 'gpt2'
if is_tf_available():
class _UpperCAmelCase ( tf.Module ):
def __init__( self : Any , _lowercase : Union[str, Any] ):
super().__init__()
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoConfig.from_pretrained(__UpperCAmelCase )
__UpperCAmelCase = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def a ( self : Dict , _lowercase : Tuple ):
__UpperCAmelCase = self.tokenizer(__UpperCAmelCase )
__UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Optional[int] ):
super().setUp()
__UpperCAmelCase = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a ( self : Optional[Any] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase = python_outputs[key].numpy()
__UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def a ( self : Dict ):
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
__UpperCAmelCase = tf.constant(__UpperCAmelCase )
__UpperCAmelCase = compiled_tokenizer(__UpperCAmelCase )
__UpperCAmelCase = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a ( self : Any ):
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = ModelToSave(tokenizer=__UpperCAmelCase )
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase = Path(__UpperCAmelCase ) / '''saved.model'''
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase = tf.saved_model.load(__UpperCAmelCase )
__UpperCAmelCase = loaded_model.signatures['''serving_default'''](__UpperCAmelCase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def a ( self : List[Any] ):
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
__UpperCAmelCase = tf_tokenizer.get_config()
__UpperCAmelCase = TFGPTaTokenizer.from_config(__UpperCAmelCase )
__UpperCAmelCase = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def a ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
__UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 49
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _lowerCAmelCase ( _lowercase ):
A__ = 'bart'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : int = d_model
lowerCAmelCase__ : str = encoder_ffn_dim
lowerCAmelCase__ : Any = encoder_layers
lowerCAmelCase__ : Dict = encoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim
lowerCAmelCase__ : Union[str, Any] = decoder_layers
lowerCAmelCase__ : Any = decoder_attention_heads
lowerCAmelCase__ : Tuple = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : Any = activation_dropout
lowerCAmelCase__ : Optional[Any] = activation_function
lowerCAmelCase__ : Union[str, Any] = init_std
lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop
lowerCAmelCase__ : int = decoder_layerdrop
lowerCAmelCase__ : Optional[int] = classifier_dropout
lowerCAmelCase__ : str = use_cache
lowerCAmelCase__ : int = encoder_layers
lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
lowerCAmelCase__ : str = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class _lowerCAmelCase ( _lowercase ):
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ : Any = {0: '''batch'''}
lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
lowerCAmelCase__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Dict = super().outputs
else:
lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase__ : int = seq_length if not self.use_past else 1
lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape
lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads
lowerCAmelCase__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : List[Any] = decoder_seq_length + 3
lowerCAmelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers
lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowerCAmelCase__ : List[str] = seqlen + 2
lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads
lowerCAmelCase__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype
lowerCAmelCase__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
lowerCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Tuple = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
lowerCAmelCase__ : int = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 678
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = SwinvaConfig()
_snake_case = swinva_name.split("_" )
_snake_case = name_split[1]
if "to" in name_split[3]:
_snake_case = int(name_split[3][-3:] )
else:
_snake_case = int(name_split[3] )
if "to" in name_split[2]:
_snake_case = int(name_split[2][-2:] )
else:
_snake_case = int(name_split[2][6:] )
if model_size == "tiny":
_snake_case = 96
_snake_case = (2, 2, 6, 2)
_snake_case = (3, 6, 12, 24)
elif model_size == "small":
_snake_case = 96
_snake_case = (2, 2, 18, 2)
_snake_case = (3, 6, 12, 24)
elif model_size == "base":
_snake_case = 1_28
_snake_case = (2, 2, 18, 2)
_snake_case = (4, 8, 16, 32)
else:
_snake_case = 1_92
_snake_case = (2, 2, 18, 2)
_snake_case = (6, 12, 24, 48)
if "to" in swinva_name:
_snake_case = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_snake_case = 2_18_41
_snake_case = "huggingface/label-files"
_snake_case = "imagenet-22k-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
else:
_snake_case = 10_00
_snake_case = "huggingface/label-files"
_snake_case = "imagenet-1k-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = img_size
_snake_case = num_classes
_snake_case = embed_dim
_snake_case = depths
_snake_case = num_heads
_snake_case = window_size
return config
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "patch_embed.proj" in name:
_snake_case = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_snake_case = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
_snake_case = "encoder." + name
if "attn.proj" in name:
_snake_case = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_snake_case = name.replace("attn" , "attention.self" )
if "norm1" in name:
_snake_case = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_snake_case = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_snake_case = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_snake_case = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_snake_case = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_snake_case = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_snake_case = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_snake_case = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
_snake_case = "layernorm.weight"
if name == "norm.bias":
_snake_case = "layernorm.bias"
if "head" in name:
_snake_case = name.replace("head" , "classifier" )
else:
_snake_case = "swinv2." + name
return name
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
_snake_case = key.split("." )
_snake_case = int(key_split[1] )
_snake_case = int(key_split[3] )
_snake_case = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[
dim : dim * 2
]
_snake_case = val[-dim:]
else:
_snake_case = val
return orig_state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
_snake_case = get_swinva_config(SCREAMING_SNAKE_CASE__ )
_snake_case = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
_snake_case = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = timm_model(inputs["pixel_values"] )
_snake_case = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__magic_name__ : str = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 368
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def UpperCamelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size["shortest_edge"] * h / w )
_snake_case = self.size["shortest_edge"]
elif w > h:
_snake_case = self.size["shortest_edge"]
_snake_case = int(self.size["shortest_edge"] * w / h )
else:
_snake_case = self.size["shortest_edge"]
_snake_case = self.size["shortest_edge"]
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = DetaImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase( self ):
# prepare image and target
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"image_id": 39_769, "annotations": target}
# encode them
_snake_case = DetaImageProcessor()
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
# prepare image, target and masks_path
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_snake_case = DetaImageProcessor(format="coco_panoptic" )
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 368
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Optional[Any] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ["""YolosFeatureExtractor"""]
_lowerCamelCase : Any = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 624
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase__ ,'''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase__ ,'''num_heads''' ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int]=1_3 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Optional[int]=3 ,lowercase__ : Dict=[1_6, 4_8, 9_6] ,lowercase__ : Optional[Any]=[1, 3, 6] ,lowercase__ : Tuple=[1, 2, 1_0] ,lowercase__ : Optional[int]=[7, 3, 3] ,lowercase__ : str=[4, 2, 2] ,lowercase__ : Dict=[2, 1, 1] ,lowercase__ : Tuple=[2, 2, 2] ,lowercase__ : Tuple=[False, False, True] ,lowercase__ : int=[0.0, 0.0, 0.0] ,lowercase__ : str=0.0_2 ,lowercase__ : Union[str, Any]=1e-1_2 ,lowercase__ : Optional[int]=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : Optional[Any]=2 ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_sizes
__lowercase = patch_stride
__lowercase = patch_padding
__lowercase = is_training
__lowercase = use_labels
__lowercase = num_labels
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = num_heads
__lowercase = stride_kv
__lowercase = depth
__lowercase = cls_token
__lowercase = attention_drop_rate
__lowercase = initializer_range
__lowercase = layer_norm_eps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return CvtConfig(
image_size=self.image_size ,num_labels=self.num_labels ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,num_heads=self.num_heads ,patch_sizes=self.patch_sizes ,patch_padding=self.patch_padding ,patch_stride=self.patch_stride ,stride_kv=self.stride_kv ,depth=self.depth ,cls_token=self.cls_token ,attention_drop_rate=self.attention_drop_rate ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[str] ):
__lowercase = CvtModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size, self.image_size)
__lowercase , __lowercase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.embed_dim[-1], height, width) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = CvtForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[int] = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ):
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
def check_hidden_states_output(lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : str ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.hidden_states
__lowercase = len(self.model_tester.depth )
self.assertEqual(len(lowercase__ ) ,lowercase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = CvtModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
| 624
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a__ ( __magic_name__ ):
lowercase_ = "realm"
def __init__( self : List[str] , UpperCamelCase_ : int=30522 , UpperCamelCase_ : List[str]=768 , UpperCamelCase_ : Optional[Any]=128 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : Optional[int]=12 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : Any=3072 , UpperCamelCase_ : Any="gelu_new" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]=512 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=0.02 , UpperCamelCase_ : Optional[int]=1e-12 , UpperCamelCase_ : Optional[int]=256 , UpperCamelCase_ : Optional[int]=10 , UpperCamelCase_ : Any=1e-3 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : List[Any]=320 , UpperCamelCase_ : Tuple=13353718 , UpperCamelCase_ : Optional[Any]=5000 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Union[str, Any]=2 , **UpperCamelCase_ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_)
# Common config
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Tuple = retriever_proj_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Dict = layer_norm_eps
# Reader config
__UpperCAmelCase : Union[str, Any] = span_hidden_size
__UpperCAmelCase : str = max_span_width
__UpperCAmelCase : Union[str, Any] = reader_layer_norm_eps
__UpperCAmelCase : str = reader_beam_size
__UpperCAmelCase : Any = reader_seq_len
# Retrieval config
__UpperCAmelCase : Dict = num_block_records
__UpperCAmelCase : List[str] = searcher_beam_size
| 77
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = GPTSwaTokenizer
_a = False
_a = True
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Tuple ):
lowerCAmelCase = """This is a test"""
lowerCAmelCase = """This is a test"""
return input_text, output_text
def __lowercase ( self : List[Any] ):
lowerCAmelCase = """<s>"""
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __lowercase ( self : Any ):
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase ) , 2000 )
def __lowercase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [465, 287, 265, 631, 842] )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
# fmt: off
self.assertListEqual(
lowerCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __lowercase ( self : int ):
lowerCAmelCase = GPTSwaTokenizer(lowerCAmelCase )
lowerCAmelCase = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCAmelCase = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(lowerCAmelCase ) , lowerCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(tokenizer.decode_fast(lowerCAmelCase ) , lowerCAmelCase )
@slow
def __lowercase ( self : Any ):
lowerCAmelCase = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCAmelCase = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowerCAmelCase , )
| 169
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__snake_case : Tuple = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[str] = True , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Dict = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE: int = True , _SCREAMING_SNAKE_CASE: List[str] = 1 / 255 , _SCREAMING_SNAKE_CASE: Union[str, Any] = True , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: List[str] = True , **_SCREAMING_SNAKE_CASE: Tuple , ) -> int:
"""simple docstring"""
super().__init__(**A__)
__lowerCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowerCAmelCase : Dict = get_size_dict(A__ , default_to_square=A__)
__lowerCAmelCase : List[str] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
__lowerCAmelCase : List[str] = get_size_dict(A__ , param_name="crop_size")
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : str = size
__lowerCAmelCase : Optional[Any] = resample
__lowerCAmelCase : Optional[Any] = do_rescale
__lowerCAmelCase : str = rescale_factor
__lowerCAmelCase : Optional[Any] = do_center_crop
__lowerCAmelCase : Dict = crop_size
__lowerCAmelCase : Optional[Any] = do_flip_channel_order
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str = PIL.Image.BILINEAR , _SCREAMING_SNAKE_CASE: int = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Any = get_size_dict(A__ , default_to_square=A__)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""")
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__)
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = get_size_dict(A__)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str] = None , **_SCREAMING_SNAKE_CASE: int , ) -> Dict:
"""simple docstring"""
return rescale(A__ , scale=A__ , data_format=A__ , **A__)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any = None) -> Any:
"""simple docstring"""
return flip_channel_order(A__ , data_format=A__)
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: List[Any] = None , _SCREAMING_SNAKE_CASE: List[str] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: Optional[int] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Union[str, Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__lowerCAmelCase : Union[str, Any] = size if size is not None else self.size
__lowerCAmelCase : Any = get_size_dict(A__ , default_to_square=A__)
__lowerCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : List[Any] = get_size_dict(A__ , param_name="crop_size")
__lowerCAmelCase : Dict = make_list_of_images(A__)
if not valid_images(A__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
__lowerCAmelCase : str = [to_numpy_array(A__) for image in images]
if do_resize:
__lowerCAmelCase : Optional[Any] = [self.resize(image=A__ , size=A__ , resample=A__) for image in images]
if do_center_crop:
__lowerCAmelCase : Dict = [self.center_crop(image=A__ , size=A__) for image in images]
if do_rescale:
__lowerCAmelCase : int = [self.rescale(image=A__ , scale=A__) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__lowerCAmelCase : List[Any] = [self.flip_channel_order(image=A__) for image in images]
__lowerCAmelCase : Any = [to_channel_dimension_format(A__ , A__) for image in images]
__lowerCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=A__ , tensor_type=A__)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] = None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A__) != len(A__):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(A__):
__lowerCAmelCase : Tuple = target_sizes.numpy()
__lowerCAmelCase : List[Any] = []
for idx in range(len(A__)):
__lowerCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=A__)
__lowerCAmelCase : List[Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A__)
else:
__lowerCAmelCase : int = logits.argmax(dim=1)
__lowerCAmelCase : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 720
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : Dict = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case ,__snake_case )
def _lowercase ( __snake_case ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = emb.weight.shape
__lowerCAmelCase : str = nn.Linear(__snake_case ,__snake_case ,bias=__snake_case )
__lowerCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def _lowercase ( __snake_case ) -> Any:
__lowerCAmelCase : List[str] = torch.load(__snake_case ,map_location="cpu" )
__lowerCAmelCase : Dict = Namespace(**checkpoint["cfg"]["model"] )
__lowerCAmelCase : Dict = checkpoint["model"]
remove_ignore_keys_(__snake_case )
__lowerCAmelCase : Dict = state_dict["decoder.embed_tokens.weight"].shape[0]
__lowerCAmelCase : Optional[int] = {key.replace("decoder" ,"model" ): val for key, val in state_dict.items()}
__lowerCAmelCase : List[Any] = XGLMConfig(
vocab_size=__snake_case ,max_position_embeddings=args.max_target_positions ,num_layers=args.decoder_layers ,attention_heads=args.decoder_attention_heads ,ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.decoder_embed_dim ,layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="gelu" ,scale_embedding=not args.no_scale_embedding ,tie_word_embeddings=args.share_decoder_input_output_embed ,)
__lowerCAmelCase : Union[str, Any] = XGLMForCausalLM(__snake_case )
__lowerCAmelCase : Tuple = model.load_state_dict(__snake_case ,strict=__snake_case )
print(__snake_case )
__lowerCAmelCase : List[str] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__snake_case : Optional[Any] = parser.parse_args()
__snake_case : Optional[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 615
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A ( _a ):
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
class A ( _a ,_a ):
lowercase_ = 2
@register_to_config
def __init__( self : Dict , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1_00 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Tuple:
"""simple docstring"""
_a = sigma_max
# setable values
_a = None
_a = None
_a = None # sigma(t_i)
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[int] = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> List[str]:
"""simple docstring"""
_a = num_inference_steps
_a = np.arange(0 , self.num_inference_steps )[::-1].copy()
_a = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
_a = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_a = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa , device=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
_a = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_a = 0
# sample eps ~ N(0, S_noise^2 * I)
_a = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase_ ).to(sample.device )
_a = sigma + gamma * sigma
_a = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_a = sample_hat + sigma_hat * model_output
_a = (sample_hat - pred_original_sample) / sigma_hat
_a = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
"""simple docstring"""
_a = sample_prev + sigma_prev * model_output
_a = (sample_prev - pred_original_sample) / sigma_prev
_a = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
| 22
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[float]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Tuple = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__lowerCAmelCase )
if len(__lowerCAmelCase ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}'''
)
raise ValueError(__lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = table.shape
strictly_diagonally_dominant(__lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = []
for row in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
for col in range(__lowerCAmelCase ):
if col == row:
SCREAMING_SNAKE_CASE__ : int = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : Any = (temp + val) / denom
new_val.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = new_val
return [float(__lowerCAmelCase ) for i in new_val]
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = table.shape
SCREAMING_SNAKE_CASE__ : str = True
for i in range(0 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-12 ):
'''simple docstring'''
lowerCamelCase : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
lowerCamelCase : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE_ , axis=1 ) , a_min=SCREAMING_SNAKE_CASE_ ) ).T
return jnp.matmul(SCREAMING_SNAKE_CASE_ , norm_emb_a.T )
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
__A : CLIPConfig
__A : jnp.dtype = jnp.floataa
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase : Union[str, Any] = nn.Dense(self.config.projection_dim , use_bias=__A , dtype=self.dtype )
lowerCamelCase : Optional[Any] = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCamelCase : Union[str, Any] = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCamelCase : List[Any] = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
lowerCamelCase : str = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.vision_model(__A )[1]
lowerCamelCase : int = self.visual_projection(__A )
lowerCamelCase : Optional[int] = jax_cosine_distance(__A , self.special_care_embeds )
lowerCamelCase : Optional[Any] = jax_cosine_distance(__A , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase : Any = 0.0
lowerCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase : int = jnp.round(__A , 3 )
lowerCamelCase : Union[str, Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=__A )
# Use a lower threshold if an image has any special care concept
lowerCamelCase : Optional[int] = is_special_care * 0.01
lowerCamelCase : List[Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase : Any = jnp.round(__A , 3 )
lowerCamelCase : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = CLIPConfig
__A : Optional[int] = "clip_input"
__A : int = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __A , __A = None , __A = 0 , __A = jnp.floataa , __A = True , **__A , ):
"""simple docstring"""
if input_shape is None:
lowerCamelCase : int = (1, 224, 224, 3)
lowerCamelCase : Union[str, Any] = self.module_class(config=__A , dtype=__A , **__A )
super().__init__(__A , __A , input_shape=__A , seed=__A , dtype=__A , _do_init=_do_init )
def _snake_case ( self , __A , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : List[str] = jax.random.normal(__A , __A )
lowerCamelCase : List[str] = jax.random.split(__A )
lowerCamelCase : Tuple = {"params": params_rng, "dropout": dropout_rng}
lowerCamelCase : int = self.module.init(__A , __A )["params"]
return random_params
def __call__( self , __A , __A = None , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = jnp.transpose(__A , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(__A , dtype=jnp.floataa ) , rngs={} , )
| 717
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_snake_case = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''BeitFeatureExtractor''']
_snake_case = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231
| 0
|
import enum
import shutil
import sys
UpperCAmelCase__ = shutil.get_terminal_size()
UpperCAmelCase__ = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class lowercase_ ( enum.Enum ):
'''simple docstring'''
__snake_case = 0
__snake_case = 1
def _a ( a :Optional[Any] , a :List[str]="" ) -> Any:
sys.stdout.write(str(a__ ) + end )
sys.stdout.flush()
def _a ( a :Union[str, Any] , a :int , a :Union[str, Any]="" ) -> List[Any]:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , a__ )
def _a ( ) -> List[str]:
forceWrite('''\r''' )
def _a ( a :Optional[Any] , a :Optional[int] ) -> List[str]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _a ( ) -> Dict:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def _a ( ) -> Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 117
|
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst
| 691
| 0
|
"""simple docstring"""
from typing import Any
def _A (__a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE_ : Tuple = {}
SCREAMING_SNAKE_CASE_ : Any = {}
for state in states_space:
SCREAMING_SNAKE_CASE_ : List[Any] = observations_space[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = observations_space[o]
SCREAMING_SNAKE_CASE_ : Dict = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE_ : Dict = ''''''
SCREAMING_SNAKE_CASE_ : Tuple = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : List[Any] = probability
SCREAMING_SNAKE_CASE_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE_ : Tuple = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : Optional[int] = arg_max
# The final observation
SCREAMING_SNAKE_CASE_ : Dict = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE_ : Optional[int] = ''''''
SCREAMING_SNAKE_CASE_ : Dict = -1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : List[Any] = probability
SCREAMING_SNAKE_CASE_ : Optional[int] = k_state
SCREAMING_SNAKE_CASE_ : Dict = arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE_ : int = last_state
SCREAMING_SNAKE_CASE_ : str = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
SCREAMING_SNAKE_CASE_ : int = pointers[previous, observations_space[o]]
result.reverse()
return result
def _A (__a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def _A (__a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def _A (__a , __a ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def _A (__a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def _A (__a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def _A (__a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def _A (__a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
SCREAMING_SNAKE_CASE_ : List[str] = f'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
SCREAMING_SNAKE_CASE_ : int = '''nested dictionary ''' if nested else ''''''
SCREAMING_SNAKE_CASE_ : int = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 712
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str=3 , lowercase_ : Dict=7 , lowercase_ : Any=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=False , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Dict=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : int="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Any=512 , lowercase_ : List[Any]=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=3 , lowercase_ : Dict=4 , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_act
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Dict = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = FalconModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : str , lowercase_ : str , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconModel(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM(config=lowercase_)
model.to(lowercase_)
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1)
SCREAMING_SNAKE_CASE_ : List[str] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE_ : Any = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE_ : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = FalconModelTester(self)
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE_ : Optional[int] = alibi
self.model_tester.create_and_check_model(lowercase_ , *lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[int] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : int = '''single_label_classification'''
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = FalconForCausalLM(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = model._convert_to_rw_cache(result.past_key_values)
SCREAMING_SNAKE_CASE_ : int = model._convert_cache_to_standard_format(lowercase_ , lowercase_)
for layer in range(len(lowercase_)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Tuple = '''multi_label_classification'''
SCREAMING_SNAKE_CASE_ : Tuple = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ : List[str] = input_ids.ne(1).to(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
SCREAMING_SNAKE_CASE_ : int = FalconForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowercase_ , '''use_cache'''):
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_).to(lowercase_)
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE_ : Tuple = (
getattr(lowercase_ , '''decoder_layers''' , lowercase_)
or getattr(lowercase_ , '''num_decoder_layers''' , lowercase_)
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE_ : str = getattr(lowercase_ , '''num_kv_heads''' , config.num_attention_heads)
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowercase_ , '''d_model''' , config.hidden_size)
SCREAMING_SNAKE_CASE_ : Dict = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE_ : int = outputs['''past_key_values''']
self.assertEqual(len(lowercase_) , lowercase_)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = inputs['''input_ids'''].shape
for i in range(lowercase_):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE_ : Dict = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''')
SCREAMING_SNAKE_CASE_ : Optional[int] = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''')
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=19)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(lowercase_)[0]
self.assertEqual(lowercase_ , lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : str = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=4)
model.generate(**lowercase_ , num_beams=2 , max_new_tokens=4)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = FalconForCausalLM.from_pretrained(lowercase_)
model.eval()
model.to(device=lowercase_)
SCREAMING_SNAKE_CASE_ : int = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(lowercase_)
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE_ : Any = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 , use_cache=lowercase_)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 176
| 0
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE__ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowercase ( _UpperCAmelCase ):
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=lowercase , required=lowercase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=lowercase , required=lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=lowercase , required=lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=lowercase , default=lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , ) -> Optional[int]:
lowerCAmelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f'Loading model {model_type}' )
lowerCAmelCase = model_type
lowerCAmelCase = tf_checkpoint
lowerCAmelCase = pytorch_dump_output
lowerCAmelCase = config
lowerCAmelCase = finetuning_task_name
def _snake_case ( self ) -> Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase = self._tf_checkpoint
lowerCAmelCase = """"""
else:
lowerCAmelCase = self._tf_checkpoint
lowerCAmelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
lowercase , self._config , self._pytorch_dump_output , lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 532
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=2 , lowercase=3 , lowercase=16 , lowercase=[1, 2, 1] , lowercase=[2, 2, 4] , lowercase=2 , lowercase=2.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=True , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=None , lowercase=True , lowercase=10 , lowercase=8 , ) -> Union[str, Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = SwinvaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> str:
lowerCAmelCase = SwinvaForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase = 1
lowerCAmelCase = SwinvaForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = self.type_sequence_label_size
lowerCAmelCase = SwinvaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> Any:
lowerCAmelCase = SwinvaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
def _snake_case ( self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" )
def _snake_case ( self ) -> str:
pass
@unittest.skip(reason="""Swinv2 does not use inputs_embeds""" )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.attentions
lowerCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(lowercase ) , lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = config.window_size**2
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , lowercase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase = len(lowercase )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
if hasattr(self.model_tester , """num_hidden_states_types""" ):
lowerCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(lowercase ) )
lowerCAmelCase = outputs.attentions
self.assertEqual(len(lowercase ) , lowercase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swinv2 has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowercase ) , lowercase )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = reshaped_hidden_states[0].shape
lowerCAmelCase = (
reshaped_hidden_states[0].view(lowercase , lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _snake_case ( self ) -> Any:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = SwinvaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(config=lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Any:
return (
AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" )
if is_vision_available()
else None
)
@slow
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to(
lowercase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowercase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCAmelCase = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
| 532
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCamelCase__ :
def __init__( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Tuple=1_3 , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Tuple=True , lowerCamelCase : Any=True , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=9_9 , lowerCamelCase : int=3_2 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=4 , lowerCamelCase : Optional[Any]=3_7 , lowerCamelCase : int="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Tuple=5_1_2 , lowerCamelCase : List[Any]=1_6 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : int=False , lowerCamelCase : Any=True , lowerCamelCase : int="None" , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : str=4 , lowerCamelCase : Tuple=None , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = relative_attention
a__ = position_biased_input
a__ = pos_att_type
a__ = scope
def __a ( self : Tuple ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Dict ):
'''simple docstring'''
a__ = TFDebertaVaModel(config=__UpperCamelCase )
a__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a__ = [input_ids, input_mask]
a__ = model(__UpperCamelCase )
a__ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str ):
'''simple docstring'''
a__ = TFDebertaVaForMaskedLM(config=__UpperCamelCase )
a__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFDebertaVaForSequenceClassification(config=__UpperCamelCase )
a__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFDebertaVaForTokenClassification(config=__UpperCamelCase )
a__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
'''simple docstring'''
a__ = TFDebertaVaForQuestionAnswering(config=__UpperCamelCase )
a__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a__ = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : str ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Tuple = False
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = TFDebertaVaModelTester(self )
a__ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : List[str] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __a ( self : List[Any] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __a ( self : Dict ):
'''simple docstring'''
a__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def __a ( self : int ):
'''simple docstring'''
pass
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
a__ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
a__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
a__ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 )
| 719
|
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[list[float]] ) -> list[list[float]]:
a__ = []
for data in source_data:
for i, el in enumerate(__lowerCamelCase ):
if len(__lowerCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCamelCase ) )
return data_lists
def _lowerCamelCase (__lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> list[list[float]]:
a__ = []
for dlist, weight in zip(__lowerCamelCase , __lowerCamelCase ):
a__ = min(__lowerCamelCase )
a__ = max(__lowerCamelCase )
a__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
a__ = f'''Invalid weight of {weight:f} provided'''
raise ValueError(__lowerCamelCase )
score_lists.append(__lowerCamelCase )
return score_lists
def _lowerCamelCase (__lowerCamelCase : list[list[float]] ) -> list[float]:
a__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCamelCase ):
a__ = final_scores[j] + ele
return final_scores
def _lowerCamelCase (__lowerCamelCase : list[list[float]] , __lowerCamelCase : list[int] ) -> list[list[float]]:
a__ = get_data(__lowerCamelCase )
a__ = calculate_each_score(__lowerCamelCase , __lowerCamelCase )
a__ = generate_final_scores(__lowerCamelCase )
# append scores to source data
for i, ele in enumerate(__lowerCamelCase ):
source_data[i].append(__lowerCamelCase )
return source_data
| 289
| 0
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# ===== initialization =====
lowerCamelCase_ =Mock()
lowerCamelCase_ =conn, Mock()
lowerCamelCase_ =iter([1, None] )
lowerCamelCase_ =lambda __snake_case : next(__snake_case )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=__snake_case )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 676
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676
| 1
|
class a__ :
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ):
a_ : List[str] = data
a_ : Union[str, Any] = previous
a_ : str = next_node
def __str__( self : str ):
return F'''{self.data}'''
def UpperCAmelCase( self : List[Any] ):
return self.data
def UpperCAmelCase( self : Dict ):
return self.next
def UpperCAmelCase( self : Dict ):
return self.previous
class a__ :
def __init__( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
a_ : Any = head
def __iter__( self : List[Any] ):
return self
def UpperCAmelCase( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
a_ : Optional[int] = self.current.get_data()
a_ : str = self.current.get_next()
return value
class a__ :
def __init__( self : Union[str, Any] ):
a_ : List[Any] = None # First node in list
a_ : Union[str, Any] = None # Last node in list
def __str__( self : Dict ):
a_ : List[Any] = self.head
a_ : str = []
while current is not None:
nodes.append(current.get_data() )
a_ : Optional[Any] = current.get_next()
return " ".join(str(_UpperCAmelCase ) for node in nodes )
def __contains__( self : str , lowerCamelCase_ : int ):
a_ : Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
a_ : str = current.get_next()
return False
def __iter__( self : Union[str, Any] ):
return LinkedListIterator(self.head )
def UpperCAmelCase( self : str ):
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : Node ):
if self.head is None:
a_ : Any = node
a_ : int = node
else:
self.insert_before_node(self.head , _UpperCAmelCase )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Node ):
if self.head is None:
self.set_head(_UpperCAmelCase )
else:
self.insert_after_node(self.tail , _UpperCAmelCase )
def UpperCAmelCase( self : str , lowerCamelCase_ : int ):
a_ : int = Node(_UpperCAmelCase )
if self.head is None:
self.set_head(_UpperCAmelCase )
else:
self.set_tail(_UpperCAmelCase )
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
a_ : Optional[int] = node
a_ : List[Any] = node.previous
if node.get_previous() is None:
a_ : Tuple = node_to_insert
else:
a_ : Tuple = node_to_insert
a_ : Dict = node_to_insert
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : Node , lowerCamelCase_ : Node ):
a_ : str = node
a_ : Union[str, Any] = node.next
if node.get_next() is None:
a_ : Union[str, Any] = node_to_insert
else:
a_ : Any = node_to_insert
a_ : Union[str, Any] = node_to_insert
def UpperCAmelCase( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : str = 1
a_ : Union[str, Any] = Node(_UpperCAmelCase )
a_ : Any = self.head
while node:
if current_position == position:
self.insert_before_node(_UpperCAmelCase , _UpperCAmelCase )
return
current_position += 1
a_ : List[Any] = node.next
self.insert_after_node(self.tail , _UpperCAmelCase )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int ):
a_ : Any = self.head
while node:
if node.get_data() == item:
return node
a_ : Tuple = node.get_next()
raise Exception("""Node not found""" )
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
if (node := self.get_node(_UpperCAmelCase )) is not None:
if node == self.head:
a_ : Union[str, Any] = self.head.get_next()
if node == self.tail:
a_ : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(_UpperCAmelCase )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Node ):
if node.get_next():
a_ : int = node.previous
if node.get_previous():
a_ : Optional[int] = node.next
a_ : List[str] = None
a_ : int = None
def UpperCAmelCase( self : int ):
return self.head is None
def _a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] )
return (item, float(__UpperCamelCase ))
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = random.randint(0 , len(__UpperCamelCase ) - 1 )
a_ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
a_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[int] = list(__UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
a_ : int = random.choice(__UpperCamelCase )
return "".join(__UpperCamelCase )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
a_ : Tuple = []
# Generate more children proportionally to the fitness score.
a_ : Any = int(parent_a[1] * 1_0_0 ) + 1
a_ : int = 1_0 if child_n >= 1_0 else child_n
for _ in range(__UpperCamelCase ):
a_ : Tuple = population_score[random.randint(0 , __UpperCamelCase )][0]
a_ , a_ : List[str] = crossover(parent_a[0] , __UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) )
return pop
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
a_ : Union[str, Any] = F'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
a_ : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
a_ : Optional[Any] = F'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__UpperCamelCase )
# Generate random starting population.
a_ : str = []
for _ in range(__UpperCamelCase ):
population.append("""""".join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
a_ , a_ : List[str] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
a_ : Tuple = [evaluate(__UpperCamelCase , __UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
a_ : Optional[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F'''\nGeneration: {generation}'''
F'''\nTotal Population:{total_population}'''
F'''\nBest score: {population_score[0][1]}'''
F'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
a_ : List[str] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__UpperCamelCase )
# Normalize population score to be between 0 and 1.
a_ : Optional[Any] = [
(item, score / len(__UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(__UpperCamelCase ):
population.extend(select(population_score[int(__UpperCamelCase )] , __UpperCamelCase , __UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCamelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowerCamelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = basic(target_str, genes_list)
print(
f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 478
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : Optional[Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowercase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 123
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowercase__ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ : List[str] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowercase__ : str = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
lowercase__ : Dict = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[str] = LxmertTokenizer
def __init__( self : Dict , lowercase_ : Any=None , lowercase_ : Union[str, Any]=None , lowercase_ : int=True , lowercase_ : List[str]="[UNK]" , lowercase_ : int="[SEP]" , lowercase_ : Union[str, Any]="[PAD]" , lowercase_ : Optional[int]="[CLS]" , lowercase_ : Union[str, Any]="[MASK]" , lowercase_ : Tuple=True , lowercase_ : Tuple=None , **lowercase_ : Union[str, Any] , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase_ ) != tokenize_chinese_chars
):
snake_case_ : int = getattr(lowercase_ , normalizer_state.pop('''type''' ) )
snake_case_ : str = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : str = tokenize_chinese_chars
snake_case_ : Tuple = normalizer_class(**lowercase_ )
snake_case_ : Any = do_lower_case
def _snake_case ( self : str , lowercase_ : Any , lowercase_ : List[str]=None ):
snake_case_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self : str , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ : str = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ):
snake_case_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 123
| 1
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
UpperCamelCase : Optional[Any] = 'src/diffusers'
# Matches is_xxx_available()
UpperCamelCase : Union[str, Any] = re.compile(r'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
UpperCamelCase : Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
UpperCamelCase : Optional[int] = '\n{0} = None\n'
UpperCamelCase : Optional[Any] = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
UpperCamelCase : Any = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def A__ ( __lowerCAmelCase : Union[str, Any] ):
lowerCamelCase__ = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def A__ ( ):
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase__ = 0
lowerCamelCase__ = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
lowerCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
lowerCamelCase__ = lines[line_index]
lowerCamelCase__ = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
lowerCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Optional[int]=None ):
if backend_specific_objects is None:
lowerCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase__ = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
lowerCamelCase__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
lowerCamelCase__ = dummy_file
return dummy_files
def A__ ( __lowerCAmelCase : List[str]=False ):
lowerCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase__ = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
lowerCamelCase__ = os.path.join(__lowerCAmelCase , """utils""" )
lowerCamelCase__ = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase : Any = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 9
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 62
|
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
SCREAMING_SNAKE_CASE : Union[str, Any] = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.expand_dims(lowercase , 1 ) * jnp.expand_dims(lowercase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE : Optional[int] = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.cos(lowercase ), jnp.sin(lowercase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(lowercase ), jnp.cos(lowercase )] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = jnp.reshape(lowercase , [jnp.shape(lowercase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(UpperCAmelCase_ )
return temb
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : bool = False
UpperCamelCase_ : float = 1
@nn.compact
def __call__( self : Optional[int] , UpperCAmelCase_ : int ):
return get_sinusoidal_embeddings(
UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 62
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( A ):
'''simple docstring'''
_A : List[Any] = ['''image_processor''', '''tokenizer''']
_A : Tuple = '''CLIPImageProcessor'''
_A : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , _a : Union[str, Any]=None , _a : Union[str, Any]=None , **_a : Dict ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
UpperCamelCase__ = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self : Optional[int] , _a : Any=None , _a : Optional[Any]=None , _a : List[str]=None , **_a : Tuple ):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
UpperCamelCase__ = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
UpperCamelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def A_ ( self : Optional[Any] , *_a : int , **_a : str ):
return self.tokenizer.batch_decode(*_a , **_a )
def A_ ( self : Dict , *_a : int , **_a : str ):
return self.tokenizer.decode(*_a , **_a )
@property
def A_ ( self : int ):
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : str ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def A_ ( self : Tuple ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 591
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowercase ( A ):
'''simple docstring'''
_A : int = '''data2vec-audio'''
def __init__( self : str , _a : List[Any]=32 , _a : str=768 , _a : Any=12 , _a : int=12 , _a : Dict=3_072 , _a : Tuple="gelu" , _a : Optional[Any]=0.1 , _a : str=0.1 , _a : Optional[Any]=0.1 , _a : int=0.0 , _a : Dict=0.1 , _a : Dict=0.1 , _a : Union[str, Any]=0.02 , _a : Any=1E-5 , _a : Tuple="gelu" , _a : str=(512, 512, 512, 512, 512, 512, 512) , _a : Any=(5, 2, 2, 2, 2, 2, 2) , _a : int=(10, 3, 3, 3, 3, 2, 2) , _a : Tuple=False , _a : Optional[Any]=16 , _a : Optional[int]=19 , _a : Dict=5 , _a : List[str]=0.05 , _a : Dict=10 , _a : Dict=2 , _a : List[Any]=0.0 , _a : Optional[Any]=10 , _a : Optional[Any]=0 , _a : Optional[Any]="sum" , _a : int=False , _a : Union[str, Any]=False , _a : Union[str, Any]=256 , _a : Union[str, Any]=(512, 512, 512, 512, 1_500) , _a : List[str]=(5, 3, 3, 1, 1) , _a : Optional[int]=(1, 2, 3, 1, 1) , _a : Tuple=512 , _a : Optional[Any]=0 , _a : Optional[int]=1 , _a : str=2 , _a : int=False , _a : Tuple=3 , _a : Union[str, Any]=2 , _a : List[Any]=3 , _a : str=None , **_a : Optional[int] , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = feat_extract_activation
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = conv_bias
UpperCamelCase__ = num_conv_pos_embeddings
UpperCamelCase__ = num_conv_pos_embedding_groups
UpperCamelCase__ = conv_pos_kernel_size
UpperCamelCase__ = len(self.conv_dim )
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = feat_proj_dropout
UpperCamelCase__ = final_dropout
UpperCamelCase__ = layerdrop
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = initializer_range
UpperCamelCase__ = vocab_size
UpperCamelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
# ctc loss
UpperCamelCase__ = ctc_loss_reduction
UpperCamelCase__ = ctc_zero_infinity
# adapter
UpperCamelCase__ = add_adapter
UpperCamelCase__ = adapter_kernel_size
UpperCamelCase__ = adapter_stride
UpperCamelCase__ = num_adapter_layers
UpperCamelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = list(_a )
UpperCamelCase__ = xvector_output_dim
@property
def A_ ( self : Tuple ):
return math.prod(self.conv_stride )
| 591
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24
|
'''simple docstring'''
import argparse
import os
import re
UpperCAmelCase_ : List[str] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase_ : Tuple = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
UpperCAmelCase_ : Dict = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase (_lowerCamelCase : List[Any] , _lowerCamelCase : bool = False )-> str:
'''simple docstring'''
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__snake_case = f.read()
__snake_case = content.split('''\n''' )
__snake_case = []
__snake_case = 0
while line_idx < len(_lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : _re_identifier.search(_lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_lowerCamelCase ) )
elif "\n".join(_lowerCamelCase ) != content:
return True
def _UpperCamelCase (_lowerCamelCase : bool = False )-> Tuple:
'''simple docstring'''
__snake_case = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for f in os.listdir(_lowerCamelCase ) if f.endswith('''.py''' )]
__snake_case = [sort_auto_mapping(_lowerCamelCase , overwrite=_lowerCamelCase ) for fname in fnames]
if not overwrite and any(_lowerCamelCase ):
__snake_case = [f for f, d in zip(_lowerCamelCase , _lowerCamelCase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_lowerCamelCase )}. Run `make style` to fix'''
''' this.''' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCAmelCase_ : List[Any] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 24
| 1
|
import math
def _A (UpperCamelCase : int ) ->bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A (UpperCamelCase : float = 0.1 ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 3
lowerCamelCase__ : Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : int , UpperCamelCase : int ) ->np.ndarray:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.getAffineTransform(UpperCamelCase , UpperCamelCase )
return cva.warpAffine(UpperCamelCase , UpperCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowercase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowercase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowercase , _lowercase = gray_img.shape
# set different points to rotate image
_lowercase = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
_lowercase = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
_lowercase = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
_lowercase = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
_lowercase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowercase = plt.figure(1)
_lowercase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 96
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class A ( UpperCAmelCase ):
a_ = '''lxmert'''
a_ = {}
def __init__( self : int , __a : Optional[Any]=3_0_5_2_2 , __a : int=7_6_8 , __a : Dict=1_2 , __a : Dict=9_5_0_0 , __a : List[Any]=1_6_0_0 , __a : List[Any]=4_0_0 , __a : Union[str, Any]=3_0_7_2 , __a : Any="gelu" , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : int=5_1_2 , __a : str=2 , __a : List[str]=0.0_2 , __a : Any=1e-12 , __a : Tuple=9 , __a : Any=5 , __a : Dict=5 , __a : Optional[Any]=2_0_4_8 , __a : List[Any]=4 , __a : Union[str, Any]=6.6_7 , __a : Optional[int]=True , __a : Optional[Any]=True , __a : Union[str, Any]=True , __a : str=True , __a : Optional[Any]=True , __a : Optional[int]=True , __a : Dict=True , **__a : Union[str, Any] , ) -> Tuple:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = num_qa_labels
__UpperCAmelCase = num_object_labels
__UpperCAmelCase = num_attr_labels
__UpperCAmelCase = l_layers
__UpperCAmelCase = x_layers
__UpperCAmelCase = r_layers
__UpperCAmelCase = visual_feat_dim
__UpperCAmelCase = visual_pos_dim
__UpperCAmelCase = visual_loss_normalizer
__UpperCAmelCase = task_matched
__UpperCAmelCase = task_mask_lm
__UpperCAmelCase = task_obj_predict
__UpperCAmelCase = task_qa
__UpperCAmelCase = visual_obj_loss
__UpperCAmelCase = visual_attr_loss
__UpperCAmelCase = visual_feat_loss
__UpperCAmelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__a )
| 262
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A ( UpperCAmelCase ):
a_ = '''time_series_transformer'''
a_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : str , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "student_t" , __a : str = "nll" , __a : int = 1 , __a : List[int] = [1, 2, 3, 4, 5, 6, 7] , __a : Optional[Union[str, bool]] = "mean" , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : int = 0 , __a : Optional[List[int]] = None , __a : Optional[List[int]] = None , __a : int = 3_2 , __a : int = 3_2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : int = 2 , __a : bool = True , __a : str = "gelu" , __a : int = 6_4 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : int = 1_0_0 , __a : float = 0.0_2 , __a : Optional[int]=True , **__a : str , ) -> int:
# time series specific configuration
__UpperCAmelCase = prediction_length
__UpperCAmelCase = context_length or prediction_length
__UpperCAmelCase = distribution_output
__UpperCAmelCase = loss
__UpperCAmelCase = input_size
__UpperCAmelCase = num_time_features
__UpperCAmelCase = lags_sequence
__UpperCAmelCase = scaling
__UpperCAmelCase = num_dynamic_real_features
__UpperCAmelCase = num_static_real_features
__UpperCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = cardinality
else:
__UpperCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__UpperCAmelCase = embedding_dimension
else:
__UpperCAmelCase = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
__UpperCAmelCase = input_size * len(__a ) + self._number_of_features
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def snake_case__ ( self : List[Any] ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 262
| 1
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
"""simple docstring"""
lowercase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCAmelCase ():
"""simple docstring"""
if os.name == "nt":
__UpperCamelCase =CursorInfo()
__UpperCamelCase =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__UpperCamelCase =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ():
"""simple docstring"""
if os.name == "nt":
__UpperCamelCase =CursorInfo()
__UpperCamelCase =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__UpperCamelCase =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ():
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 296
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__lowercase = logging.getLogger()
def lowerCAmelCase (__UpperCamelCase : Path , __UpperCamelCase : list ):
"""simple docstring"""
__UpperCamelCase ='''\n'''.join(__UpperCamelCase )
Path(__UpperCamelCase ).open('''w''' ).writelines(__UpperCamelCase )
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowercase ( __a ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__UpperCamelCase =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__UpperCamelCase =[''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
__UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__UpperCamelCase =f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
run_generate()
assert Path(UpperCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
__UpperCamelCase =input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
__UpperCamelCase ={
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() )
__UpperCamelCase =str(tmp_dir / '''scores.json''' )
__UpperCamelCase =str(tmp_dir / '''val.target''' )
_dump_articles(UpperCamelCase__ , text['''en'''] )
_dump_articles(UpperCamelCase__ , text['''de'''] )
__UpperCamelCase ='''translation_en_to_de''' if model == T5_TINY else '''summarization'''
__UpperCamelCase =f"""
run_eval_search.py
{model}
{str(UpperCamelCase__ )}
{str(UpperCamelCase__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(UpperCamelCase__ , '''argv''' , UpperCamelCase__ ):
with CaptureStdout() as cs:
run_search()
__UpperCamelCase =[''' num_beams | length_penalty''', model, '''Best score args''']
__UpperCamelCase =['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(UpperCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase__ ).exists()
os.remove(Path(UpperCamelCase__ ) )
| 296
| 1
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self:Union[str, Any] , _a:List[str] ):
snake_case__ = data
snake_case__ = [0X67_452_301, 0XEF_CDA_B89, 0X98_BAD_CFE, 0X10_325_476, 0XC3_D2E_1F0]
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:Dict , _a:List[str] ):
return ((n << b) | (n >> (32 - b))) & 0XFF_FFF_FFF
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
snake_case__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Optional[Any] ):
snake_case__ = list(struct.unpack('''>16L''' , _a ) ) + [0] * 64
for i in range(16 , 80 ):
snake_case__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.padding()
snake_case__ = self.split_blocks()
for block in self.blocks:
snake_case__ = self.expand_block(_a )
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
snake_case__ = (b & c) | ((~b) & d)
snake_case__ = 0X5A_827_999
elif 20 <= i < 40:
snake_case__ = b ^ c ^ d
snake_case__ = 0X6E_D9E_BA1
elif 40 <= i < 60:
snake_case__ = (b & c) | (b & d) | (c & d)
snake_case__ = 0X8F_1BB_CDC
elif 60 <= i < 80:
snake_case__ = b ^ c ^ d
snake_case__ = 0XCA_62C_1D6
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = (
self.rotate(_a , 5 ) + f + e + k + expanded_block[i] & 0XFF_FFF_FFF,
a,
self.rotate(_a , 30 ),
c,
d,
)
snake_case__ = (
self.h[0] + a & 0XFF_FFF_FFF,
self.h[1] + b & 0XFF_FFF_FFF,
self.h[2] + c & 0XFF_FFF_FFF,
self.h[3] + d & 0XFF_FFF_FFF,
self.h[4] + e & 0XFF_FFF_FFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
snake_case__ = B'''Test String'''
assert SHAaHash(__lowerCAmelCase ).final_hash() == hashlib.shaa(__lowerCAmelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
snake_case__ = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
snake_case__ = parser.parse_args()
snake_case__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
snake_case__ = f.read()
else:
snake_case__ = bytes(__lowerCAmelCase , '''utf-8''' )
print(SHAaHash(__lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 33
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = (UniPCMultistepScheduler,)
UpperCamelCase = (('''num_inference_steps''', 25),)
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**A )
return config
def snake_case_( self , A=0 , **A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" , A )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**A )
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
_SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
_SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = sample, sample
for t in range(A , time_step + scheduler.config.solver_order + 1 ):
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A , **A ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_( self , A=0 , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" , A )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
_SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
_SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
_SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A , **A ).prev_sample
_SCREAMING_SNAKE_CASE = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_( self , A=None , **A ) -> Dict:
if scheduler is None:
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**A )
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**A )
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = model(A , A )
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A ).prev_sample
return sample
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
_SCREAMING_SNAKE_CASE = kwargs.pop("""num_inference_steps""" , A )
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config()
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
_SCREAMING_SNAKE_CASE = self.dummy_sample
_SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(A , """set_timesteps""" ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ):
_SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
_SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
_SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
_SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A , **A ).prev_sample
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_( self ) -> Union[str, Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
_SCREAMING_SNAKE_CASE = self.full_loop(scheduler=A )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
_SCREAMING_SNAKE_CASE = self.full_loop(scheduler=A )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def snake_case_( self ) -> Dict:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=A )
def snake_case_( self ) -> Dict:
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , solver_order=A , solver_type=A , )
def snake_case_( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def snake_case_( self ) -> Union[str, Any]:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A , solver_type=A , prediction_type=A , )
_SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=A , solver_type=A , prediction_type=A , )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def snake_case_( self ) -> str:
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def snake_case_( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=A , time_step=0 )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.full_loop()
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="""v_prediction""" )
_SCREAMING_SNAKE_CASE = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=A , dynamic_thresholding_ratio=0 )
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
_SCREAMING_SNAKE_CASE = 10
_SCREAMING_SNAKE_CASE = self.dummy_model()
_SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
_SCREAMING_SNAKE_CASE = model(A , A )
_SCREAMING_SNAKE_CASE = scheduler.step(A , A , A ).prev_sample
assert sample.dtype == torch.floataa
def snake_case_( self , **A ) -> str:
for scheduler_class in self.scheduler_classes:
_SCREAMING_SNAKE_CASE = self.get_scheduler_config(**A )
_SCREAMING_SNAKE_CASE = scheduler_class(**A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 314
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase_ = (7_20, 12_80) # Height, Width
lowercase_ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase_ = 1 / 1_00
lowercase_ = ""
lowercase_ = ""
lowercase_ = ""
lowercase_ = 2_50
def __lowerCAmelCase ( ):
'''simple docstring'''
__snake_case , __snake_case : str = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
__snake_case , __snake_case , __snake_case : int = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case : Any = random_chars(3_2 )
__snake_case : Any = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
__snake_case : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__snake_case : List[str] = []
for anno in new_annos:
__snake_case : Optional[int] = anno[3] - anno[1]
__snake_case : Tuple = anno[4] - anno[2]
__snake_case : Any = anno[1] + width / 2
__snake_case : Union[str, Any] = anno[2] + height / 2
__snake_case : str = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__snake_case : List[Any] = []
__snake_case : Dict = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , """*.txt""" ) ):
__snake_case : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
__snake_case : Union[str, Any] = in_file.readlines()
__snake_case : Union[str, Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
__snake_case : Optional[Any] = []
for obj_list in obj_lists:
__snake_case : Tuple = obj_list.rstrip("""\n""" ).split(""" """ )
__snake_case : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
__snake_case : List[str] = float(obj[2] ) - float(obj[4] ) / 2
__snake_case : Optional[Any] = float(obj[1] ) + float(obj[3] ) / 2
__snake_case : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
'''simple docstring'''
__snake_case : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__snake_case : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__snake_case : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__snake_case : Dict = int(scale_x * output_size[1] )
__snake_case : Tuple = int(scale_y * output_size[0] )
__snake_case : Dict = []
__snake_case : int = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
__snake_case : List[str] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
__snake_case : str = all_annos[index]
__snake_case : Optional[Any] = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__snake_case : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__snake_case : Optional[Any] = img
for bbox in img_annos:
__snake_case : List[str] = bbox[1] * scale_x
__snake_case : str = bbox[2] * scale_y
__snake_case : Union[str, Any] = bbox[3] * scale_x
__snake_case : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__snake_case : Any = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__snake_case : Optional[Any] = img
for bbox in img_annos:
__snake_case : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__snake_case : int = bbox[2] * scale_y
__snake_case : List[Any] = scale_x + bbox[3] * (1 - scale_x)
__snake_case : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__snake_case : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__snake_case : Dict = img
for bbox in img_annos:
__snake_case : List[str] = bbox[1] * scale_x
__snake_case : int = scale_y + bbox[2] * (1 - scale_y)
__snake_case : Union[str, Any] = bbox[3] * scale_x
__snake_case : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__snake_case : Union[str, Any] = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__snake_case : int = img
for bbox in img_annos:
__snake_case : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__snake_case : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__snake_case : Dict = scale_x + bbox[3] * (1 - scale_x)
__snake_case : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__snake_case : Union[str, Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__snake_case : Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 390
|
lowercase_ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 390
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
_lowercase : int = StableUnCLIPImgaImgPipeline
_lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowercase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase : Union[str, Any] = frozenset([] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = 32
_lowerCAmelCase = embedder_hidden_size
# image encoding components
_lowerCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=_lowercase , projection_dim=_lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
_lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_lowercase )
_lowerCAmelCase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowercase , layers_per_block=1 , upcast_attention=_lowercase , use_linear_projection=_lowercase , )
torch.manual_seed(0 )
_lowerCAmelCase = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=_lowercase , steps_offset=1 , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _lowercase ( self , _lowercase , _lowercase=0 , _lowercase=True ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
if pil_image:
_lowerCAmelCase = input_image * 0.5 + 0.5
_lowerCAmelCase = input_image.clamp(0 , 1 )
_lowerCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_lowerCAmelCase = DiffusionPipeline.numpy_to_pil(_lowercase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline(**_lowercase )
_lowerCAmelCase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = self.get_dummy_inputs(_lowercase )
inputs.update({"""image_embeds""": None} )
_lowerCAmelCase = sd_pipe(**_lowercase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=_lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=_lowercase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_lowercase , """anime turle""" , generator=_lowercase , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase = pipe(_lowercase , """anime turle""" , generator=_lowercase , output_type="""np""" )
_lowerCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = pipe(
_lowercase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 5
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__a : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "decord" )
self.check_model_type(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 ) -> List[Any]:
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase = BytesIO(requests.get(SCREAMING_SNAKE_CASE ).content )
UpperCamelCase = VideoReader(SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num=SCREAMING_SNAKE_CASE , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(SCREAMING_SNAKE_CASE ).asnumpy()
UpperCamelCase = list(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ) -> Optional[Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 606
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A : Any = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[int] = ['''ConvNextFeatureExtractor''']
_A : List[str] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 709
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 330
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ : Dict ={'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase__ : List[str] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 253
| 0
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase__ = i + 1
else:
UpperCamelCase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 701
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : Tuple = "deformable_detr"
__a : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : List[Any] , lowercase : List[Any]=True , lowercase : Tuple=None , lowercase : Tuple=3 , lowercase : List[str]=3_0_0 , lowercase : List[Any]=1_0_2_4 , lowercase : List[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Union[str, Any]=8 , lowercase : Optional[Any]=6 , lowercase : Tuple=1_0_2_4 , lowercase : Any=8 , lowercase : List[str]=0.0 , lowercase : Any=True , lowercase : Union[str, Any]="relu" , lowercase : Dict=2_5_6 , lowercase : Optional[int]=0.1 , lowercase : Optional[int]=0.0 , lowercase : List[str]=0.0 , lowercase : Any=0.0_2 , lowercase : Optional[int]=1.0 , lowercase : Tuple=True , lowercase : Optional[int]=False , lowercase : Any="sine" , lowercase : List[str]="resnet50" , lowercase : List[str]=True , lowercase : Optional[int]=False , lowercase : int=4 , lowercase : str=4 , lowercase : Dict=4 , lowercase : int=False , lowercase : List[Any]=3_0_0 , lowercase : List[Any]=False , lowercase : Dict=1 , lowercase : int=5 , lowercase : List[Any]=2 , lowercase : List[str]=1 , lowercase : Tuple=1 , lowercase : Dict=5 , lowercase : Dict=2 , lowercase : Dict=0.1 , lowercase : List[str]=0.2_5 , lowercase : Tuple=False , **lowercase : str , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase__ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
UpperCamelCase__ = backbone_config.get("""model_type""" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(lowercase )
UpperCamelCase__ = use_timm_backbone
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = backbone
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = dilation
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
UpperCamelCase__ = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def A ( self : str ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A ( self : Tuple ) -> int:
'''simple docstring'''
return self.d_model
def A ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 265
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowerCamelCase : Optional[Any] = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
A__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
A__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
A__ = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
A__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = {}
import re
A__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
A__ = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
A__ = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase_ ):
A__ = re_encoder_block_conv_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
A__ = re_encoder_block_conv_in.sub(lowercase_ , lowercase_ )
elif re_encoder_block_resnet.fullmatch(lowercase_ ):
A__ = re_encoder_block_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_encoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_encoder_block_proj_out.fullmatch(lowercase_ ):
A__ = re_encoder_block_proj_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
A__ = re_encoder_block_proj_out.sub(lowercase_ , lowercase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase_ ):
A__ = re_decoder_block_conv_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
A__ = re_decoder_block_conv_out.sub(lowercase_ , lowercase_ )
elif re_decoder_block_resnet.fullmatch(lowercase_ ):
A__ = re_decoder_block_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_decoder_block_resnet.sub(lowercase_ , lowercase_ )
elif re_decoder_block_proj_in.fullmatch(lowercase_ ):
A__ = re_decoder_block_proj_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
A__ = re_decoder_block_proj_in.sub(lowercase_ , lowercase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase_ ):
A__ = re_prior_cond_conv_out.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
A__ = re_prior_cond_conv_out.sub(lowercase_ , lowercase_ )
elif re_prior_cond_resnet.fullmatch(lowercase_ ):
A__ = re_prior_cond_resnet.match(lowercase_ )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = {'''1''': 1, '''3''': 2}[groups[-2]]
A__ = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
A__ = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
A__ = prefix + resnet_block
A__ = re_prior_cond_resnet.sub(lowercase_ , lowercase_ )
elif re_prior_cond_proj_in.fullmatch(lowercase_ ):
A__ = re_prior_cond_proj_in.match(lowercase_ )
A__ = regex_match.groups()
A__ = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
A__ = re_prior_cond_proj_in.sub(lowercase_ , lowercase_ )
# keep original key
else:
A__ = original_key
A__ = replace_key(lowercase_ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
A__ = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
A__ = original_key
A__ = original_key
A__ = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_=None , lowercase_=None ) -> str:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
A__ = requests.get(f"""{PREFIX}{file}""" , allow_redirects=lowercase_ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=lowercase_ )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
A__ = MODEL_MAPPING[model_name.split('''/''' )[-1]]
A__ = JukeboxConfig.from_pretrained(lowercase_ )
A__ = JukeboxModel(lowercase_ )
A__ = []
A__ = {}
for i, dict_name in enumerate(lowercase_ ):
A__ = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['''model''']
A__ = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
A__ = old_dic[k]
elif k.endswith('''.w''' ):
A__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ = old_dic[k]
else:
A__ = old_dic[k]
A__ = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
A__ = fix_jukebox_keys(lowercase_ , model.state_dict() , lowercase_ , lowercase_ )
weight_dict.append(lowercase_ )
A__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase_ )
for i in range(len(lowercase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(lowercase_ , lowercase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
return weight_dict
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 87
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
A__ = []
def generate(lowercase_ , lowercase_ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , lowercase_ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , lowercase_ )
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
_lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
_lowerCamelCase : str = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 87
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[Any] , a : List[str] , a : List[Any] , a : Tuple , a : str , a : Optional[int] , a : str , a : List[Any] , )-> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : str = "auto" )-> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self : Dict , a : Optional[int] , a : Tuple = 512 , a : str = 512 , a : Optional[int] = 50 , a : List[Any] = 7.5 , a : Dict = None , a : Any = 1 , a : Optional[int] = 0.0 , a : int = None , a : int = None , a : Union[str, Any] = "pil" , a : Dict = True , a : Optional[Any] = None , a : Optional[Any] = 1 , a : Any = None , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = 1
elif isinstance(a , a ):
lowercase__ = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , a , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = ['']
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase__ = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=a , truncation=a , return_tensors='pt' , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(a , a , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
a , generator=a , device='cpu' , dtype=a ).to(self.device )
lowercase__ = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ = torch.randn(
a , generator=a , device=self.device , dtype=a )
lowercase__ = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ = self.unet(a , a , encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(a , a , a , **a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(a ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(a ) , return_tensors='pt' ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=a , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = model.config
UpperCAmelCase__ : Optional[int] = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
UpperCAmelCase__ : Union[str, Any] = MBartConfig(
is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , add_cross_attention=lowerCamelCase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowerCamelCase_ , add_final_layer_norm=lowerCamelCase_ , )
return encoder_config, decoder_config
def _UpperCamelCase ( UpperCamelCase__ ):
if "encoder.model" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
UpperCAmelCase__ : Optional[int] = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
UpperCAmelCase__ : Tuple = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase__ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
UpperCAmelCase__ : int = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase__ : int = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase__ : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase__ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
UpperCAmelCase__ : int = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
UpperCAmelCase__ : Optional[int] = '''encoder.layernorm.bias'''
return name
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : List[str] = orig_state_dict.pop(lowerCamelCase_ )
if "qkv" in key:
UpperCAmelCase__ : List[str] = key.split(""".""" )
UpperCAmelCase__ : Tuple = int(key_split[3] )
UpperCAmelCase__ : Tuple = int(key_split[5] )
UpperCAmelCase__ : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ : str = val[:dim, :]
UpperCAmelCase__ : str = val[dim : dim * 2, :]
UpperCAmelCase__ : List[Any] = val[-dim:, :]
else:
UpperCAmelCase__ : Optional[Any] = val[:dim]
UpperCAmelCase__ : Dict = val[dim : dim * 2]
UpperCAmelCase__ : Union[str, Any] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
UpperCAmelCase__ : Optional[Any] = val
return orig_state_dict
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ):
# load original model
UpperCAmelCase__ : List[Any] = DonutModel.from_pretrained(lowerCamelCase_ ).eval()
# load HuggingFace model
UpperCAmelCase__ : List[Any] = get_configs(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = DonutSwinModel(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = MBartForCausalLM(lowerCamelCase_ )
UpperCAmelCase__ : Dict = VisionEncoderDecoderModel(encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
model.eval()
UpperCAmelCase__ : Any = original_model.state_dict()
UpperCAmelCase__ : Union[str, Any] = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify results on scanned document
UpperCAmelCase__ : str = load_dataset("""hf-internal-testing/example-documents""" )
UpperCAmelCase__ : str = dataset['''test'''][0]['''image'''].convert("""RGB""" )
UpperCAmelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase_ , from_slow=lowerCamelCase_ )
UpperCAmelCase__ : Any = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
UpperCAmelCase__ : int = DonutProcessor(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
UpperCAmelCase__ : Union[str, Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
UpperCAmelCase__ : Union[str, Any] = '''When is the coffee break?'''
UpperCAmelCase__ : str = task_prompt.replace("""{user_input}""" , lowerCamelCase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
UpperCAmelCase__ : Optional[Any] = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
UpperCAmelCase__ : int = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
UpperCAmelCase__ : Optional[int] = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
UpperCAmelCase__ : str = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
UpperCAmelCase__ : Union[str, Any] = '''hello world'''
else:
raise ValueError("""Model name not supported""" )
UpperCAmelCase__ : Any = original_model.decoder.tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors="""pt""" )[
'''input_ids'''
]
UpperCAmelCase__ : Optional[Any] = original_model.encoder.model.patch_embed(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = model.encoder.embeddings(lowerCamelCase_ )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
# verify encoder hidden states
UpperCAmelCase__ : int = original_model.encoder(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = model.encoder(lowerCamelCase_ ).last_hidden_state
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-2 )
# verify decoder hidden states
UpperCAmelCase__ : List[Any] = original_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).logits
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__A =parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 407
|
import string
from math import logaa
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
A : Optional[int] = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
A : Dict = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
A : Dict = corpus_without_punctuation.split('''\n''' )
A : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCamelCase_ ))
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return round(tf * idf , 3 )
| 542
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
A_ :Tuple = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
A_ :List[Any] = json.load(f)
@require_torch
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
return FSMTTokenizer.from_pretrained(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =FSMTForConditionalGeneration.from_pretrained(lowerCamelCase__ ).to(lowerCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =f'facebook/wmt19-{pair}'
__UpperCamelCase : str =self.get_tokenizer(lowerCamelCase__ )
__UpperCamelCase : Dict =self.get_model(lowerCamelCase__ )
__UpperCamelCase : List[Any] =bleu_data[pair]['src']
__UpperCamelCase : Optional[Any] =bleu_data[pair]['tgt']
__UpperCamelCase : Union[str, Any] =tokenizer(lowerCamelCase__ , return_tensors='pt' , truncation=lowerCamelCase__ , padding='longest' ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__UpperCamelCase : int =tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
__UpperCamelCase : List[str] =calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
self.assertGreaterEqual(scores['bleu'] , lowerCamelCase__ )
| 154
|
A_ :str = '''Tobias Carryer'''
from time import time
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=int(time() ) ): # noqa: B008
"""simple docstring"""
__UpperCamelCase : Optional[int] =multiplier
__UpperCamelCase : str =increment
__UpperCamelCase : Union[str, Any] =modulo
__UpperCamelCase : str =seed
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
A_ :Optional[int] = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 154
| 1
|
"""simple docstring"""
from __future__ import annotations
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : List[str] = data
snake_case : Node | None = None
snake_case : Node | None = None
def __lowerCAmelCase ( lowercase : Node | None ) -> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowerCAmelCase ( lowercase : Node | None ) -> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowerCAmelCase ( lowercase : Node ) -> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowerCAmelCase ( ) -> None: # Main function for testing.
"""simple docstring"""
snake_case : str = Node(1 )
snake_case : Dict = Node(2 )
snake_case : List[str] = Node(3 )
snake_case : Union[str, Any] = Node(4 )
snake_case : str = Node(5 )
snake_case : str = Node(6 )
snake_case : int = Node(7 )
snake_case : List[str] = Node(8 )
snake_case : Union[str, Any] = Node(9 )
print(is_full_binary_tree(lowercase ) )
print(depth_of_tree(lowercase ) )
print("Tree is: " )
display(lowercase )
if __name__ == "__main__":
main()
| 178
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case ,snake_case : List[str] = image.size
snake_case ,snake_case : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case : Any = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
snake_case : Dict = np.array(lowercase ).astype(np.floataa ) / 255.0
snake_case : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case : List[Any] = torch.from_numpy(lowercase )
return 2.0 * image - 1.0
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 1 , UpperCamelCase__ = 100 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
snake_case : Tuple = 1
elif isinstance(UpperCamelCase__ , torch.Tensor ):
snake_case : str = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}' )
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
snake_case : Optional[int] = preprocess(UpperCamelCase__ )
snake_case ,snake_case : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case : List[str] = next(self.unet.parameters() ).dtype
snake_case : int = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
snake_case : List[Any] = image.to(device=self.device , dtype=UpperCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
snake_case : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : Union[str, Any] = {}
if accepts_eta:
snake_case : List[str] = eta
for t in self.progress_bar(UpperCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
snake_case : int = torch.cat([latents, image] , dim=1 )
snake_case : List[Any] = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
snake_case : int = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
snake_case : Optional[int] = self.vqvae.decode(UpperCamelCase__ ).sample
snake_case : Tuple = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 )
snake_case : Union[str, Any] = image / 2 + 0.5
snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Union[str, Any] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 178
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0.00
lowerCamelCase__ : int = 0
for resistor in resistors:
if resistor <= 0:
lowerCamelCase__ : Union[str, Any] = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_lowerCamelCase )
first_sum += 1 / float(_lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = 0.00
lowerCamelCase__ : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCamelCase__ : Optional[int] = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696
|
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ : Optional[int] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
if group not in primes:
raise ValueError('Unsupported Group' )
lowerCamelCase__ : int = primes[group]['prime']
lowerCamelCase__ : Optional[int] = primes[group]['generator']
lowerCamelCase__ : Any = int(hexlify(urandom(3_2 ) ), base=1_6 )
def a__ (self ):
'''simple docstring'''
return hex(self.__private_key )[2:]
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = pow(self.generator, self.__private_key, self.prime )
return hex(lowerCamelCase_ )[2:]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= key <= self.prime - 2
and pow(lowerCamelCase_, (self.prime - 1) // 2, self.prime ) == 1
)
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = int(lowerCamelCase_, base=1_6 )
if not self.is_valid_public_key(lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Tuple = pow(lowerCamelCase_, self.__private_key, self.prime )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return (
2 <= remote_public_key_str <= prime - 2
and pow(lowerCamelCase_, (prime - 1) // 2, lowerCamelCase_ ) == 1
)
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1_4 ):
'''simple docstring'''
lowerCamelCase__ : Dict = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[Any] = int(lowerCamelCase_, base=1_6 )
lowerCamelCase__ : List[str] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(lowerCamelCase_, lowerCamelCase_ ):
raise ValueError('Invalid public key' )
lowerCamelCase__ : Dict = pow(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
return shaaaa(str(lowerCamelCase_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 696
| 1
|
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self : List[Any] ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(A_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(A_ ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(A_ )
lowerCamelCase_ = rays.view(A_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self : int , A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(A_ , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(A_ , -1 , 2 )
lowerCamelCase_ = (
self.z.view(A_ , 1 , 3 )
+ self.x.view(A_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(A_ , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=A_ )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(A_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(A_ , *A_ , 2 , 3 )
def a__ ( self : Any , A_ : int , A_ : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=A_ , height=A_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCamelCase_ = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
lowerCamelCase_ = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 70
|
"""simple docstring"""
from __future__ import annotations
def a_ ( lowercase__ :list[float] ):
if len(lowercase__ ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
"""simple docstring"""
lowerCAmelCase_ = MBartConfig
lowerCAmelCase_ = {}
lowerCAmelCase_ = """gelu"""
def __init__( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=20 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : str=1 , UpperCamelCase__ : Union[str, Any]=0 , ) -> Optional[int]:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =eos_token_id
_UpperCamelCase =pad_token_id
_UpperCamelCase =bos_token_id
def UpperCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase =prepare_mbart_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def UpperCamelCase__ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> Any:
_UpperCamelCase =TFMBartModel(config=UpperCamelCase__ ).get_decoder()
_UpperCamelCase =inputs_dict['''input_ids''']
_UpperCamelCase =input_ids[:1, :]
_UpperCamelCase =inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase =inputs_dict['''head_mask''']
_UpperCamelCase =1
# first forward pass
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
_UpperCamelCase , _UpperCamelCase =outputs.to_tuple()
_UpperCamelCase =past_key_values[1]
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase =tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCamelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowerCAmelCase_ = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> int:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =TFMBartModelTester(self )
_UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : List[Any] ) -> List[Any]:
_UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowerCAmelCase_ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowerCAmelCase_ = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase__ ( self : Optional[Any] ) -> Dict:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase__ ( self : List[str] ) -> Any:
_UpperCamelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase__ ( self : int , **UpperCamelCase__ : Union[str, Any] ) -> int:
_UpperCamelCase =self.translate_src_text(**UpperCamelCase__ )
self.assertListEqual(self.expected_text , UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> Tuple:
_UpperCamelCase =self.tokenizer(self.src_text , **UpperCamelCase__ , return_tensors='''tf''' )
_UpperCamelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
_UpperCamelCase =self.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def UpperCamelCase__ ( self : List[str] ) -> List[str]:
self._assert_generated_batch_equal_expected()
| 700
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_UpperCamelCase =1024
_UpperCamelCase =4096
_UpperCamelCase =24
_UpperCamelCase =16
_UpperCamelCase =[5, 11, 17, 23]
_UpperCamelCase =[256, 512, 1024, 1024]
_UpperCamelCase =(1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =[256, 512, 768, 768]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase =(1, 384, 384)
_UpperCamelCase =False
_UpperCamelCase ='''project'''
if "ade" in checkpoint_url:
_UpperCamelCase =True
_UpperCamelCase =768
_UpperCamelCase =[1, 1, 1, 0.5]
_UpperCamelCase =150
_UpperCamelCase =16
_UpperCamelCase ='''huggingface/label-files'''
_UpperCamelCase ='''ade20k-id2label.json'''
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
_UpperCamelCase =[1, 150, 480, 480]
return config, expected_shape
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_UpperCamelCase =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_UpperCamelCase =name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_UpperCamelCase =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_UpperCamelCase =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_UpperCamelCase =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_UpperCamelCase =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_UpperCamelCase =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_UpperCamelCase =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_UpperCamelCase =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_UpperCamelCase =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_UpperCamelCase =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_UpperCamelCase =name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_UpperCamelCase =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_UpperCamelCase =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_UpperCamelCase =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_UpperCamelCase =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_UpperCamelCase =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_UpperCamelCase =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_UpperCamelCase =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_UpperCamelCase =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_UpperCamelCase =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_UpperCamelCase =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_UpperCamelCase =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_UpperCamelCase =name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_UpperCamelCase =name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_UpperCamelCase =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_UpperCamelCase =name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_UpperCamelCase =name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_UpperCamelCase =name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_UpperCamelCase =name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_UpperCamelCase =name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[: config.hidden_size, :]
_UpperCamelCase =in_proj_bias[: config.hidden_size]
_UpperCamelCase =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase =in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase =in_proj_bias[-config.hidden_size :]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase =get_dpt_config(__SCREAMING_SNAKE_CASE )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# read in qkv matrices
read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load HuggingFace model
_UpperCamelCase =DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
_UpperCamelCase =480 if '''ade''' in checkpoint_url else 384
_UpperCamelCase =DPTImageProcessor(size=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =prepare_img()
_UpperCamelCase =image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**__SCREAMING_SNAKE_CASE ).predicted_depth
if show_prediction:
_UpperCamelCase =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=__SCREAMING_SNAKE_CASE , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 271
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__snake_case = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class __lowerCamelCase (unittest.TestCase ):
def snake_case_ ( self: List[str],A_: str,A_: bool,A_: str = None,A_: list = None ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = os.path.abspath(os.path.join('examples','by_feature' ) )
__UpperCamelCase = os.path.abspath('examples' )
for item in os.listdir(A_ ):
if item not in EXCLUDE_EXAMPLES:
__UpperCamelCase = os.path.join(A_,A_ )
if os.path.isfile(A_ ) and ".py" in item_path:
with self.subTest(
tested_script=A_,feature_script=A_,tested_section='main()' if parser_only else 'training_function()',):
__UpperCamelCase = compare_against_test(
os.path.join(A_,A_ ),A_,A_,A_ )
__UpperCamelCase = '\n'.join(A_ )
if special_strings is not None:
for string in special_strings:
__UpperCamelCase = diff.replace(A_,'' )
self.assertEqual(A_,'' )
def snake_case_ ( self: int ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py',A_ )
self.one_complete_example('complete_nlp_example.py',A_ )
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = os.path.abspath(os.path.join('examples','cv_example.py' ) )
__UpperCamelCase = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py',A_,A_,A_ )
self.one_complete_example('complete_cv_example.py',A_,A_,A_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __lowerCamelCase (_a ):
_lowercase = False
@classmethod
def snake_case_ ( cls: int ):
'''simple docstring'''
super().setUpClass()
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = os.path.join(cls._tmpdir,'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def snake_case_ ( cls: Optional[Any] ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'epoch_0' ) ) )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir,'step_2' ) ) )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir,'epoch_0' )}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
self.assertNotIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir,'step_2' )}
'''.split()
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
if torch.cuda.is_available():
__UpperCamelCase = torch.cuda.device_count()
else:
__UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
else:
self.assertIn('epoch 0:',A_ )
self.assertIn('epoch 1:',A_ )
@slow
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ,{'TESTING_MOCKED_DATALOADERS': '0'} ):
__UpperCamelCase = run_command(self._launch_args + testargs,return_stdout=A_ )
__UpperCamelCase = re.findall('({.+})',A_ )
__UpperCamelCase = [r for r in results if 'accuracy' in r][-1]
__UpperCamelCase = ast.literal_eval(A_ )
self.assertGreaterEqual(results['accuracy'],0.7_5 )
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ,{'WANDB_MODE': 'offline'} )
def snake_case_ ( self: Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCamelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A_,'tracking' ) ) )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 1
|
'''simple docstring'''
def __lowerCamelCase ( UpperCAmelCase_ = 10_00 ) ->int:
return sum(e for e in range(3 , UpperCAmelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 368
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=19 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> Any:
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
def _a (self ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=SCREAMING_SNAKE_CASE_ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(EsmForProteinFolding,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] =()
SCREAMING_SNAKE_CASE_ : List[str] ={} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : int =False
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = EsmFoldModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _a (self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Does not support attention outputs''' )
def _a (self ) -> str:
'''simple docstring'''
pass
@unittest.skip
def _a (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _a (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _a (self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def _a (self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _a (self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def _a (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold only has one output format.''' )
def _a (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def _a (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def _a (self ) -> int:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _a (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _a (self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def _a (self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class _A ( __UpperCamelCase ):
@slow
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCamelCase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )['''positions''']
UpperCamelCase__ = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 469
|
__magic_name__ ={
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 469
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = args.pruning_method
lowerCAmelCase__ : Optional[Any] = args.threshold
lowerCAmelCase__ : Tuple = args.model_name_or_path.rstrip('''/''')
lowerCAmelCase__ : Optional[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""")
lowerCAmelCase__ : List[str] = torch.load(os.path.join(SCREAMING_SNAKE_CASE__ ,'''pytorch_model.bin'''))
lowerCAmelCase__ : int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase__ : Any = tensor
print(f"""Copied layer {name}""")
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase__ : Optional[int] = tensor
print(f"""Copied layer {name}""")
elif "bias" in name:
lowerCAmelCase__ : Union[str, Any] = tensor
print(f"""Copied layer {name}""")
else:
if pruning_method == "magnitude":
lowerCAmelCase__ : Dict = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE__ ,threshold=SCREAMING_SNAKE_CASE__)
lowerCAmelCase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""")
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase__ : Optional[int] = name[:-6]
lowerCAmelCase__ : Union[str, Any] = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ : str = TopKBinarizer.apply(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
lowerCAmelCase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""")
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase__ : Tuple = name[:-6]
lowerCAmelCase__ : Any = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ : int = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
lowerCAmelCase__ : Tuple = tensor * mask
print(f"""Pruned layer {name}""")
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase__ : List[str] = name[:-6]
lowerCAmelCase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowerCAmelCase__ : Union[str, Any] = -0.1, 1.1
lowerCAmelCase__ : Any = torch.sigmoid(SCREAMING_SNAKE_CASE__)
lowerCAmelCase__ : Union[str, Any] = s * (r - l) + l
lowerCAmelCase__ : Dict = s_bar.clamp(min=0.0 ,max=1.0)
lowerCAmelCase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""")
else:
raise ValueError('''Unknown pruning method''')
if target_model_path is None:
lowerCAmelCase__ : int = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE__) ,f"""bertarized_{os.path.basename(SCREAMING_SNAKE_CASE__)}""")
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
shutil.copytree(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
print(f"""\nCreated folder {target_model_path}""")
torch.save(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,'''pytorch_model.bin'''))
print('''\nPruned model saved! See you later!''')
if __name__ == "__main__":
__snake_case : Tuple =argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__snake_case : List[Any] =parser.parse_args()
main(args)
| 647
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 237
| 0
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_lowercase = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
_lowercase = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
_lowercase = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def _snake_case ( snake_case__ : List[str] ):
def remove_articles(snake_case__ : List[Any] ):
A = re.compile(r'\b(a|an|the)\b' , re.UNICODE )
return re.sub(snake_case__ , ' ' , snake_case__ )
def white_space_fix(snake_case__ : Tuple ):
return " ".join(text.split() )
def remove_punc(snake_case__ : int ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def _snake_case ( snake_case__ : str , snake_case__ : List[Any] ):
return int(normalize_answer(snake_case__ ) == normalize_answer(snake_case__ ) )
def _snake_case ( snake_case__ : int , snake_case__ : Union[str, Any] ):
A = [any(compute_exact(snake_case__ , snake_case__ ) for ref in refs ) for pred, refs in zip(snake_case__ , snake_case__ )]
return (sum(snake_case__ ) / len(snake_case__ )) * 100
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
A = [rgram for rgrams in rgramslist for rgram in rgrams]
A = Counter(snake_case__ )
A = Counter(snake_case__ )
A = Counter()
for sgram, scount in sgramcounter.items():
A = scount * numref
A = Counter(snake_case__ )
A = Counter()
for cgram, ccount in cgramcounter.items():
A = ccount * numref
# KEEP
A = sgramcounter_rep & cgramcounter_rep
A = keepgramcounter_rep & rgramcounter
A = sgramcounter_rep & rgramcounter
A = 0
A = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A = 1
A = 1
if len(snake_case__ ) > 0:
A = keeptmpscorea / len(snake_case__ )
if len(snake_case__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
A = keeptmpscorea / sum(keepgramcounterall_rep.values() )
A = 0
if keepscore_precision > 0 or keepscore_recall > 0:
A = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
A = sgramcounter_rep - cgramcounter_rep
A = delgramcounter_rep - rgramcounter
A = sgramcounter_rep - rgramcounter
A = 0
A = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A = 1
if len(snake_case__ ) > 0:
A = deltmpscorea / len(snake_case__ )
# ADDITION
A = set(snake_case__ ) - set(snake_case__ )
A = set(snake_case__ ) & set(snake_case__ )
A = set(snake_case__ ) - set(snake_case__ )
A = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
A = 1
A = 1
if len(snake_case__ ) > 0:
A = addtmpscore / len(snake_case__ )
if len(snake_case__ ) > 0:
A = addtmpscore / len(snake_case__ )
A = 0
if addscore_precision > 0 or addscore_recall > 0:
A = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : str ):
A = len(snake_case__ )
A = ssent.split(' ' )
A = csent.split(' ' )
A = []
A = []
A = []
A = []
A = []
A = []
A = []
A = []
A = []
A = []
for rsent in rsents:
A = rsent.split(' ' )
A = []
A = []
A = []
ragramslist.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
A = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
A = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
A = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(snake_case__ )
ragramslist.append(snake_case__ )
ragramslist.append(snake_case__ )
ragramslist.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
A = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
A = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
A = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(snake_case__ )
for i in range(0 , len(snake_case__ ) - 1 ):
if i < len(snake_case__ ) - 1:
A = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(snake_case__ )
if i < len(snake_case__ ) - 2:
A = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(snake_case__ )
if i < len(snake_case__ ) - 3:
A = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(snake_case__ )
((A) , (A) , (A)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((A) , (A) , (A)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((A) , (A) , (A)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
((A) , (A) , (A)) = SARIngram(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
A = sum([delascore, delascore, delascore, delascore] ) / 4
A = sum([addascore, addascore, addascore, addascore] ) / 4
A = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _snake_case ( snake_case__ : Dict , snake_case__ : bool = True , snake_case__ : str = "13a" , snake_case__ : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
A = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
A = sacrebleu.metrics.bleu._get_tokenizer(snake_case__ )()(snake_case__ )
else:
A = sacrebleu.TOKENIZERS[tokenizer]()(snake_case__ )
elif tokenizer == "moses":
A = sacremoses.MosesTokenizer().tokenize(snake_case__ , return_str=snake_case__ , escape=snake_case__ )
elif tokenizer == "penn":
A = sacremoses.MosesTokenizer().penn_tokenize(snake_case__ , return_str=snake_case__ )
else:
A = sentence
if not return_str:
A = normalized_sent.split()
return normalized_sent
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ):
if not (len(snake_case__ ) == len(snake_case__ ) == len(snake_case__ )):
raise ValueError('Sources length must match predictions and references lengths.' )
A = 0
for src, pred, refs in zip(snake_case__ , snake_case__ , snake_case__ ):
sari_score += SARIsent(normalize(snake_case__ ) , normalize(snake_case__ ) , [normalize(snake_case__ ) for sent in refs] )
A = sari_score / len(snake_case__ )
return 100 * sari_score
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Union[str, Any]="exp" , snake_case__ : Dict=None , snake_case__ : Union[str, Any]=False , snake_case__ : Dict=False , snake_case__ : Optional[Any]=False , ):
A = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
A = [[refs[i] for refs in references] for i in range(snake_case__ )]
A = sacrebleu.corpus_bleu(
snake_case__ , snake_case__ , smooth_method=snake_case__ , smooth_value=snake_case__ , force=snake_case__ , lowercase=snake_case__ , use_effective_order=snake_case__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ,A_ : int ,A_ : Any ) -> Union[str, Any]:
A = {}
result.update({'sari': compute_sari(sources=A_ ,predictions=A_ ,references=A_ )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=A_ ,references=A_ )} )
result.update({'exact': compute_em(predictions=A_ ,references=A_ )} )
return result
| 22
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 22
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Any = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149
|
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : int = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = VOCAB_FILES_NAMES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = ['''input_ids''', '''attention_mask''']
_A : List[str] = CodeGenTokenizer
def __init__( self : Optional[int] , __a : str=None , __a : Optional[Any]=None , __a : Optional[Any]=None , __a : Dict="<|endoftext|>" , __a : Optional[int]="<|endoftext|>" , __a : Any="<|endoftext|>" , __a : List[Any]=False , **__a : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
if kwargs.pop("""add_bos_token""" , __a ):
__lowercase : Optional[Any] = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
__lowercase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
__lowercase : Union[str, Any] = getattr(__a , pre_tok_state.pop("""type""" ) )
__lowercase : List[str] = add_prefix_space
__lowercase : str = pre_tok_class(**__a )
__lowercase : str = add_prefix_space
def lowerCAmelCase ( self : List[Any] , *__a : Tuple , **__a : Tuple ) -> BatchEncoding:
"""simple docstring"""
__lowercase : int = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def lowerCAmelCase ( self : int , *__a : List[Any] , **__a : List[str] ) -> BatchEncoding:
"""simple docstring"""
__lowercase : List[Any] = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase : Dict = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __a : bool = False , __a : bool = None , __a : Optional[List[str]] = None , **__a : Optional[Any] , ) -> str:
"""simple docstring"""
__lowercase : int = super().decode(
token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , )
if truncate_before_pattern is not None and len(__a ) > 0:
__lowercase : Any = self.truncate(__a , __a )
return decoded_text
def lowerCAmelCase ( self : List[str] , __a : int , __a : Tuple ) -> List[str]:
"""simple docstring"""
def find_re(__a : int , __a : Any , __a : str ):
__lowercase : Dict = pattern.search(__a , __a )
return m.start() if m else -1
__lowercase : Any = [re.compile(__a , re.MULTILINE ) for pattern in truncate_before_pattern]
__lowercase : Dict = list(re.finditer("""^print""" , __a , re.MULTILINE ) )
if len(__a ) > 1:
__lowercase : Tuple = completion[: prints[1].start()]
__lowercase : Any = list(re.finditer("""^def""" , __a , re.MULTILINE ) )
if len(__a ) > 1:
__lowercase : Dict = completion[: defs[1].start()]
__lowercase : Union[str, Any] = 0
__lowercase : int = [
pos for pos in [find_re(__a , __a , __a ) for terminal in terminals] if pos != -1
]
if len(__a ) > 0:
return completion[: min(__a )]
else:
return completion
| 149
| 1
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
a__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )]
a__ : List[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
a__ : Optional[Any] = position % (lowest * 2) # puts it in bounds
a__ : Dict = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
a__ : List[Any] = ["".join(lowerCAmelCase__ ) for row in temp_grid]
a__ : int = "".join(lowerCAmelCase__ )
return output_string
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
a__ : Any = []
a__ : str = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
a__ : list[list[str]] = [[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
a__ : List[Any] = position % (lowest * 2) # puts it in bounds
a__ : Tuple = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
a__ : Any = 0
for row in temp_grid: # fills in the characters
a__ : List[Any] = input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
a__ : List[Any] = "" # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
a__ : int = position % (lowest * 2) # puts it in bounds
a__ : Any = min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowercase__ ( lowerCAmelCase__ : str ) -> dict[int, str]:
'''simple docstring'''
a__ : List[Any] = {}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
a__ : Optional[Any] = decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
a__ : Any = create_tensor(lowerCAmelCase__ )
a__ : Optional[Any] = gather(lowerCAmelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = [state.process_index]
a__ : Optional[int] = gather_object(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase__ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ : str = create_tensor(lowerCAmelCase__ )
a__ : Any = broadcast(lowerCAmelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowercase__ ( lowerCAmelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
a__ : Any = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a__ : Union[str, Any] = torch.arange(state.num_processes ).to(state.device )
a__ : List[Any] = pad_across_processes(lowerCAmelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowercase__ ( lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : List[str] = create_tensor(lowerCAmelCase__ )
a__ : Union[str, Any] = reduce(lowerCAmelCase__ , "sum" )
a__ : List[str] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : Tuple = create_tensor(lowerCAmelCase__ )
a__ : Dict = reduce(lowerCAmelCase__ , "mean" )
a__ : Tuple = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ ), F"{reduced_tensor} != {truth_tensor}"
def lowercase__ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = PartialState()
state.print(F"State: {state}" )
state.print("testing gather" )
test_gather(lowerCAmelCase__ )
state.print("testing gather_object" )
test_gather_object(lowerCAmelCase__ )
state.print("testing broadcast" )
test_broadcast(lowerCAmelCase__ )
state.print("testing pad_across_processes" )
test_pad_across_processes(lowerCAmelCase__ )
state.print("testing reduce_sum" )
test_reduce_sum(lowerCAmelCase__ )
state.print("testing reduce_mean" )
test_reduce_mean(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 251
| 1
|
def _UpperCAmelCase ( a : int = 50 ):
snake_case__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
|
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a__ = 5_0_0_0_0_0
a__ , a__ = os.path.split(__file__)
a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ):
snake_case__ = dataset.map(**a )
@get_duration
def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ):
snake_case__ = dataset.filter(**a )
def _UpperCAmelCase ( ):
snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case__ = generate_example_dataset(
os.path.join(a , """dataset.arrow""" ) , a , num_examples=a )
snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a )
def tokenize(a : Union[str, Any] ):
return tokenizer(examples["""text"""] )
snake_case__ = map(a )
snake_case__ = map(a , batched=a )
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""numpy""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""pandas""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case__ = map(a , function=lambda a : None , batched=a )
snake_case__ = map(a , function=a , batched=a )
snake_case__ = filter(a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a , """wb""" ) as f:
f.write(json.dumps(a ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase : str = logging.get_logger(__name__)
class lowerCamelCase__ ( __a):
'''simple docstring'''
def __init__( self :List[Any] , *a :Tuple , **a :Any ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 719
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 94
| 0
|
from __future__ import annotations
def UpperCamelCase__ ( _A: int | float | str , _A: int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
__lowerCamelCase = int(a_ )
__lowerCamelCase = int(a_ )
__lowerCamelCase = []
for temp in range(int(a_ ) ):
series.append(f'''1 / {pow(temp + 1 , int(a_ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : int = int(input('Enter the last number (nth term) of the P-Series'))
_a : Dict = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 479
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698
| 0
|
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase__ : Tuple = 3_00 # TEMPERATURE (unit = K)
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Any = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''git_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : str = hidden_size
__A : List[Any] = intermediate_size
__A : Any = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Dict = num_channels
__A : int = patch_size
__A : str = image_size
__A : Union[str, Any] = initializer_range
__A : int = attention_dropout
__A : Dict = layer_norm_eps
__A : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase)
__A ,__A : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type') == "git":
__A : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''git'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=3_0522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase)
if vision_config is None:
__A : str = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.')
__A : List[Any] = GitVisionConfig(**_UpperCAmelCase)
__A : int = vocab_size
__A : Optional[Any] = hidden_size
__A : List[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : Dict = hidden_act
__A : Optional[int] = intermediate_size
__A : str = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : int = max_position_embeddings
__A : str = initializer_range
__A : int = layer_norm_eps
__A : Tuple = position_embedding_type
__A : Dict = use_cache
__A : Tuple = tie_word_embeddings
__A : Optional[Any] = num_image_with_embedding
__A : Dict = bos_token_id
__A : Tuple = eos_token_id
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = copy.deepcopy(self.__dict__)
__A : Any = self.vision_config.to_dict()
__A : List[str] = self.__class__.model_type
return output
| 338
| 1
|
'''simple docstring'''
class _SCREAMING_SNAKE_CASE:
def __init__( self : List[Any] , UpperCamelCase_ : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ :str = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ :Optional[Any] = array[0]
for i in range(1 , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :str = self.prefix_sum[i - 1] + array[i]
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __lowerCamelCase ( self : str , UpperCamelCase_ : int ) -> bool:
SCREAMING_SNAKE_CASE__ :List[str] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase_ = '''.'''
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCamelCase_ = []
UpperCamelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase_ = line.strip()
UpperCamelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase_ = '''\n'''.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 209
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__A = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__A = {
'google/rembert': 256,
}
__A = '▁'
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
__magic_name__ :Tuple = VOCAB_FILES_NAMES
__magic_name__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Union[str, Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , **lowercase__ , )
lowerCAmelCase__ :Any = do_lower_case
lowerCAmelCase__ :Union[str, Any] = remove_space
lowerCAmelCase__ :Any = keep_accents
lowerCAmelCase__ :Union[str, Any] = vocab_file
lowerCAmelCase__ :str = False if not self.vocab_file else True
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :str = [self.sep_token_id]
lowerCAmelCase__ :Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [self.sep_token_id]
lowerCAmelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowercase__ ) )
return
lowerCAmelCase__ :Dict = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 703
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__A = {"""UserAgent""": UserAgent().random}
def __A (_SCREAMING_SNAKE_CASE ) ->dict:
"""simple docstring"""
lowerCAmelCase__ :Dict = script.contents[0]
lowerCAmelCase__ :int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = F"https://www.instagram.com/{username}/"
lowerCAmelCase__ :List[str] = self.get_json()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = requests.get(self.url , headers=__UpperCAmelCase ).text
lowerCAmelCase__ :Optional[int] = BeautifulSoup(__UpperCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
'''simple docstring'''
return F"{self.__class__.__name__}('{self.username}')"
def __str__( self ):
'''simple docstring'''
return F"{self.fullname} ({self.username}) is {self.biography}"
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["username"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["biography"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def snake_case ( self ):
'''simple docstring'''
return self.user_data["is_private"]
def __A (_SCREAMING_SNAKE_CASE = "github" ) ->None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCAmelCase__ :str = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 12_0000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = InstagramUser("""github""")
print(instagram_user)
print(F'''{instagram_user.number_of_posts = }''')
print(F'''{instagram_user.number_of_followers = }''')
print(F'''{instagram_user.number_of_followings = }''')
print(F'''{instagram_user.email = }''')
print(F'''{instagram_user.website = }''')
print(F'''{instagram_user.profile_picture_url = }''')
print(F'''{instagram_user.is_verified = }''')
print(F'''{instagram_user.is_private = }''')
| 560
| 0
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def _A (UpperCamelCase : Accelerator , UpperCamelCase : DatasetDict , UpperCamelCase : List[int] , UpperCamelCase : List[int] , UpperCamelCase : int = 16 ) ->int:
'''simple docstring'''
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase__ : Optional[Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCamelCase ),
"""validation""": dataset["""train"""].select(UpperCamelCase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(UpperCamelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase , max_length=UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Dict = datasets.map(
UpperCamelCase , batched=UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : List[Any] = 8
else:
lowerCamelCase__ : Any = None
return tokenizer.pad(
UpperCamelCase , padding="""longest""" , max_length=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase__ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCamelCase__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
lowerCamelCase__ : Any = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase )
return train_dataloader, eval_dataloader, test_dataloader
def _A (UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) ->Dict:
'''simple docstring'''
lowerCamelCase__ : List[Any] = []
# Download the dataset
lowerCamelCase__ : Optional[int] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
lowerCamelCase__ : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : List[Any] = config["""lr"""]
lowerCamelCase__ : List[Any] = int(config["""num_epochs"""] )
lowerCamelCase__ : int = int(config["""seed"""] )
lowerCamelCase__ : Any = int(config["""batch_size"""] )
lowerCamelCase__ : Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : int = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase )
# New Code #
# Create our folds:
lowerCamelCase__ : Dict = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
lowerCamelCase__ : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase ):
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = get_fold_dataloaders(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : List[str] = AdamW(params=model.parameters() , lr=UpperCamelCase )
# Instantiate scheduler
lowerCamelCase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : List[str] = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Now we train the model
for epoch in range(UpperCamelCase ):
model.train()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : List[Any] = model(**UpperCamelCase )
lowerCamelCase__ : Optional[Any] = outputs.loss
lowerCamelCase__ : str = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**UpperCamelCase )
lowerCamelCase__ : Optional[Any] = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase , references=UpperCamelCase , )
lowerCamelCase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Union[str, Any] = []
for step, batch in enumerate(UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[str] = model(**UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = outputs.logits
lowerCamelCase__ ,lowerCamelCase__ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : Dict = torch.cat(UpperCamelCase , dim=0 )
lowerCamelCase__ : str = torch.stack(UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : List[Any] = metric.compute(predictions=UpperCamelCase , references=UpperCamelCase )
accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase )
def _A () ->List[Any]:
'''simple docstring'''
lowerCamelCase__ : int = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase , default=UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCamelCase , default=3 , help="""The number of splits to perform across the dataset""" )
lowerCamelCase__ : Optional[int] = parser.parse_args()
lowerCamelCase__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
main()
| 157
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def _A (UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any]=8 ) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ):
super().__init__()
self.register_modules(
unet=__magic_name__ , scheduler=__magic_name__ , movq=__magic_name__ , )
lowerCamelCase__ : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if latents is None:
lowerCamelCase__ : Any = randn_tensor(__magic_name__ , generator=__magic_name__ , device=__magic_name__ , dtype=__magic_name__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
lowerCamelCase__ : Optional[int] = latents.to(__magic_name__ )
lowerCamelCase__ : Dict = latents * scheduler.init_noise_sigma
return latents
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase__ : int = torch.device(f"cuda:{gpu_id}" )
lowerCamelCase__ : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
def _snake_case (self , __magic_name__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase__ : Any = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__magic_name__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = cpu_offload_with_hook(__magic_name__ , __magic_name__ , prev_module_hook=__magic_name__ )
# We'll offload the last model manually.
lowerCamelCase__ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case (self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__magic_name__ )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ = 512 , __magic_name__ = 512 , __magic_name__ = 100 , __magic_name__ = 4.0 , __magic_name__ = 1 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ):
lowerCamelCase__ : Any = self._execution_device
lowerCamelCase__ : List[Any] = guidance_scale > 1.0
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : List[Any] = torch.cat(__magic_name__ , dim=0 )
lowerCamelCase__ : str = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase__ : Tuple = torch.cat(__magic_name__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase__ : Tuple = image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : str = negative_image_embeds.repeat_interleave(__magic_name__ , dim=0 )
lowerCamelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__magic_name__ )
self.scheduler.set_timesteps(__magic_name__ , device=__magic_name__ )
lowerCamelCase__ : Optional[int] = self.scheduler.timesteps
lowerCamelCase__ : Tuple = self.unet.config.in_channels
lowerCamelCase__ ,lowerCamelCase__ : int = downscale_height_and_width(__magic_name__ , __magic_name__ , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __magic_name__ , __magic_name__ , __magic_name__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : str = {"""image_embeds""": image_embeds}
lowerCamelCase__ : Any = self.unet(
sample=__magic_name__ , timestep=__magic_name__ , encoder_hidden_states=__magic_name__ , added_cond_kwargs=__magic_name__ , return_dict=__magic_name__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ ,lowerCamelCase__ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ ,lowerCamelCase__ : Any = noise_pred.chunk(2 )
lowerCamelCase__ ,lowerCamelCase__ : str = variance_pred.chunk(2 )
lowerCamelCase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ ,lowerCamelCase__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Tuple = self.scheduler.step(
__magic_name__ , __magic_name__ , __magic_name__ , generator=__magic_name__ , )[0]
# post-processing
lowerCamelCase__ : Optional[int] = self.movq.decode(__magic_name__ , force_not_quantize=__magic_name__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : Tuple = image * 0.5 + 0.5
lowerCamelCase__ : Tuple = image.clamp(0 , 1 )
lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Tuple = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 157
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase( _a , unittest.TestCase ):
snake_case_ : Optional[Any] = RobertaTokenizer
snake_case_ : Union[str, Any] = RobertaTokenizerFast
snake_case_ : Union[str, Any] = True
snake_case_ : Tuple = {"cls_token": "<s>"}
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
__snake_case = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__snake_case = {"unk_token": "<unk>"}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case = "lower newer"
__snake_case = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case = "lower newer"
__snake_case = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
__snake_case = tokenizer.tokenize(SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> str:
'''simple docstring'''
__snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=SCREAMING_SNAKE_CASE ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.tokenizer_class.from_pretrained("roberta-base" )
__snake_case = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(
"sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__snake_case = self.get_tokenizer()
__snake_case = "Encode this sequence."
__snake_case = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
__snake_case = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE )} ) # mask token has a left space
__snake_case = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
__snake_case = "Encode <mask> sequence"
__snake_case = "Encode <mask>sequence"
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE )
__snake_case = encoded.index(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(SCREAMING_SNAKE_CASE )
__snake_case = encoded.index(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = "A, <mask> AllenNLP sentence."
__snake_case = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["add_prefix_space"] , SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state["trim_offsets"] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case = f'''{text_of_1_token} {text_of_1_token}'''
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ), 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
__snake_case = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , trim_offsets=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ), 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
| 706
|
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
__snake_case = abs(_lowerCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _lowerCAmelCase ( _lowerCAmelCase ) -> int:
'''simple docstring'''
return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) )
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase ) -> None:
__snake_case = F'''{func.__name__}({value})'''
__snake_case = timeit(F'''__main__.{call}''' , setup="import __main__" )
print(F'''{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 473
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = path_or_paths
UpperCAmelCase_ : Optional[int] = split if split or isinstance(_snake_case ,_snake_case ) else "train"
UpperCAmelCase_ : Tuple = features
UpperCAmelCase_ : int = cache_dir
UpperCAmelCase_ : Optional[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : List[str] = num_proc
UpperCAmelCase_ : int = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = False ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Union[str, Any] = features
UpperCAmelCase_ : Dict = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : int = streaming
UpperCAmelCase_ : Tuple = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def UpperCamelCase__ ( self ):
pass
| 71
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __snake_case :
def __init__( self, A, A, A = 0 ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : str = row, column
lowerCamelCase : List[str] = [[default_value for c in range(A )] for r in range(A )]
def __str__( self ):
"""simple docstring"""
lowerCamelCase : Any = F'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase : List[Any] = max(A, len(str(A ) ) )
lowerCamelCase : int = F'''%{max_element_length}s'''
# Make string and return
def single_line(A ) -> str:
nonlocal string_format_identifier
lowerCamelCase : List[str] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(A ) for row_vector in self.array )
return s
def __repr__( self ):
"""simple docstring"""
return str(self )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
if not (isinstance(A, (list, tuple) ) and len(A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, A ):
"""simple docstring"""
assert self.validate_indicies(A )
return self.array[loc[0]][loc[1]]
def __setitem__( self, A, A ):
"""simple docstring"""
assert self.validate_indicies(A )
lowerCamelCase : List[Any] = value
def __add__( self, A ):
"""simple docstring"""
assert isinstance(A, A )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase : Optional[int] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Union[str, Any] = -self[r, c]
return result
def __sub__( self, A ):
"""simple docstring"""
return self + (-another)
def __mul__( self, A ):
"""simple docstring"""
if isinstance(A, (int, float) ): # Scalar multiplication
lowerCamelCase : List[Any] = Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(A, A ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase : List[Any] = Matrix(self.row, another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase : Tuple = F'''Unsupported type given for another ({type(A )})'''
raise TypeError(A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = Matrix(self.column, self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase : str = self[r, c]
return result
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
assert isinstance(A, A ) and isinstance(A, A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase : Union[str, Any] = v.transpose()
lowerCamelCase : Optional[int] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase ( ):
# a^(-1)
lowerCamelCase : Optional[Any] = Matrix(3 , 3 , 0)
for i in range(3):
lowerCamelCase : Union[str, Any] = 1
print(F'''a^(-1) is {ainv}''')
# u, v
lowerCamelCase : Dict = Matrix(3 , 1 , 0)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = 1, 2, -3
lowerCamelCase : Tuple = Matrix(3 , 1 , 0)
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = 4, -2, 5
print(F'''u is {u}''')
print(F'''v is {v}''')
print(F'''uv^T is {u * v.transpose()}''')
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCAmelCase__ , UpperCAmelCase__)}''')
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
testa()
| 320
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=13 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : List[str]=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Optional[int]=None , ):
'''simple docstring'''
lowercase : Optional[int] =parent
lowercase : Tuple =batch_size
lowercase : Optional[int] =seq_length
lowercase : Optional[Any] =is_training
lowercase : Any =use_input_mask
lowercase : Any =use_token_type_ids
lowercase : Dict =use_labels
lowercase : Any =vocab_size
lowercase : int =hidden_size
lowercase : List[Any] =num_hidden_layers
lowercase : str =num_attention_heads
lowercase : Optional[int] =intermediate_multiple_size
lowercase : str =hidden_act
lowercase : List[Any] =hidden_dropout
lowercase : Tuple =attention_dropout
lowercase : Optional[int] =weight_tying
lowercase : List[Any] =max_position_embeddings
lowercase : Union[str, Any] =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : List[str] =initializer_range
lowercase : List[Any] =num_labels
lowercase : Union[str, Any] =num_choices
lowercase : Union[str, Any] =scope
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =None
if self.use_input_mask:
lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_labels:
lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple =self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Optional[int] =self.prepare_config_and_inputs()
lowercase : Dict =True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : int =GPTNeoXJapaneseModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowercase : Optional[Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : str =True
lowercase : Dict =GPTNeoXJapaneseModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : int =GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =True
lowercase : Optional[Any] =GPTNeoXJapaneseForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowercase : Tuple =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowercase : Dict =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase : Optional[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase : Union[str, Any] =torch.cat([input_mask, next_mask] , dim=-1 )
lowercase : List[Any] =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
lowercase : Any =output_from_no_past['''hidden_states'''][0]
lowercase : Optional[int] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0]
# select random slice
lowercase : Tuple =ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Union[str, Any] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCamelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Union[str, Any] =GPTNeoXJapaneseModelTester(self )
lowercase : str =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
# This regression test was failing with PyTorch < 1.3
lowercase , lowercase , lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase : Any =None
self.model_tester.create_and_check_model_as_decoder(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase , lowercase , lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any ='''abeja/gpt-neox-japanese-2.7b'''
lowercase : Optional[int] =['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
lowercase : Any =[
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
lowercase : Optional[int] =GPTNeoXJapaneseTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase : List[Any] =GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCAmelCase__ )
lowercase : List[str] =[]
for prompt in prompts:
lowercase : Optional[Any] =tokenizer(UpperCAmelCase__ , return_tensors='''pt''' ).input_ids
lowercase : List[Any] =model.generate(UpperCAmelCase__ , max_length=50 )
lowercase : List[str] =tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 88
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowercase (__UpperCamelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase ( A ) -> Union[str, Any]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase ( self ) -> Tuple:
raise NotImplementedError()
| 587
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672
| 0
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase_ : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 704
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : Tuple = CodeGenTokenizer
lowerCamelCase_ : str = CodeGenTokenizerFast
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Dict = {"""add_prefix_space""": True}
lowerCamelCase_ : Any = False
def _snake_case ( self : Optional[Any]):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ :Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE_ :List[Any] = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase))))
SCREAMING_SNAKE_CASE_ :Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ :str = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_ :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase))
def _snake_case ( self : Dict , **UpperCAmelCase : List[str]):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase)
def _snake_case ( self : Optional[Any] , **UpperCAmelCase : Any):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase)
def _snake_case ( self : Any , UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ :Tuple = "lower newer"
SCREAMING_SNAKE_CASE_ :Tuple = "lower newer"
return input_text, output_text
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ :Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
SCREAMING_SNAKE_CASE_ :List[Any] = "lower newer"
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE_ :Tuple = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ :List[str] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = rust_tokenizer.tokenize(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :int = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = rust_tokenizer.encode(UpperCAmelCase)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase)
# Testing the unknown token
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ :Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase) , UpperCAmelCase)
def _snake_case ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any]):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _snake_case ( self : Tuple , UpperCAmelCase : Any=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
SCREAMING_SNAKE_CASE_ :Any = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase)
# Simple input
SCREAMING_SNAKE_CASE_ :List[str] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ :Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>")
# Simple input
SCREAMING_SNAKE_CASE_ :List[Any] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :List[Any] = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE_ :Optional[int] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ :Optional[int] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE_ :Any = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer(UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(*UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np")
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np")
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30)
self.assertTrue(pad_token_id in out_s["input_ids"])
self.assertTrue(0 in out_s["attention_mask"])
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0])
self.assertFalse(0 in out_sa["attention_mask"][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1])
self.assertTrue(0 in out_sa["attention_mask"][1])
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60)
self.assertTrue(pad_token_id in out_p["input_ids"])
self.assertTrue(0 in out_p["attention_mask"])
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0])
self.assertFalse(0 in out_pa["attention_mask"][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1])
self.assertTrue(0 in out_pa["attention_mask"][1])
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Tuple = "$$$"
SCREAMING_SNAKE_CASE_ :str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = "This is a simple input"
SCREAMING_SNAKE_CASE_ :int = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ :int = tokenizer(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer(UpperCAmelCase)
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer.decode(out_s.input_ids)
SCREAMING_SNAKE_CASE_ :List[str] = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , UpperCAmelCase)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Optional[int] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
SCREAMING_SNAKE_CASE_ :List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
SCREAMING_SNAKE_CASE_ :str = "\nif len_a > len_b: result = a\nelse: result = b"
SCREAMING_SNAKE_CASE_ :Optional[int] = tokenizer.encode(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[Any] = ["^#", re.escape("<|endoftext|>"), "^'''", "^\"\"\"", "\n\n\n"]
SCREAMING_SNAKE_CASE_ :Any = tokenizer.decode(UpperCAmelCase , truncate_before_pattern=UpperCAmelCase)
self.assertEqual(UpperCAmelCase , UpperCAmelCase)
def _snake_case ( self : Dict):
pass
| 140
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase : Optional[Any] =logging.get_logger(__name__)
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = b.T
__lowerCamelCase = np.sum(np.square(UpperCamelCase_ ) , axis=1 )
__lowerCamelCase = np.sum(np.square(UpperCamelCase_ ) , axis=0 )
__lowerCamelCase = np.matmul(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = x.reshape(-1 , 3 )
__lowerCamelCase = squared_euclidean_distance(UpperCamelCase_ , UpperCamelCase_ )
return np.argmin(UpperCamelCase_ , axis=1 )
class _SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['pixel_values']
def __init__( self , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = True , **_snake_case , ):
"""simple docstring"""
super().__init__(**__lowerCamelCase )
__lowerCamelCase = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__lowerCamelCase = get_size_dict(__lowerCamelCase )
__lowerCamelCase = np.array(__lowerCamelCase ) if clusters is not None else None
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_normalize
__lowerCamelCase = do_color_quantize
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self , _snake_case , _snake_case = None , ):
"""simple docstring"""
__lowerCamelCase = rescale(image=__lowerCamelCase , scale=1 / 1_27.5 , data_format=__lowerCamelCase )
__lowerCamelCase = image - 1
return image
def _lowerCamelCase ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__lowerCamelCase )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowerCamelCase = clusters if clusters is not None else self.clusters
__lowerCamelCase = np.array(__lowerCamelCase )
__lowerCamelCase = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__lowerCamelCase ) for image in images]
if do_color_quantize:
__lowerCamelCase = [to_channel_dimension_format(__lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowerCamelCase = np.array(__lowerCamelCase )
__lowerCamelCase = color_quantize(__lowerCamelCase , __lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowerCamelCase = images.shape[0]
__lowerCamelCase = images.reshape(__lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowerCamelCase = list(__lowerCamelCase )
else:
__lowerCamelCase = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__lowerCamelCase = {'''input_ids''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 316
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = list(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase__ :Union[str, Any] = '''_'''
if count > 1:
return False
else:
return "".join(UpperCamelCase_ )
def a__ ( UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :int = []
while True:
UpperCAmelCase__ :Dict = ['''$'''] * len(UpperCamelCase_ )
UpperCAmelCase__ :List[str] = []
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1, len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[Any] = compare_string(binary[i], binary[j] )
if k is False:
UpperCAmelCase__ :Any = '''*'''
UpperCAmelCase__ :List[Any] = '''*'''
temp.append('''X''' )
for i in range(len(UpperCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(UpperCamelCase_ ) == 0:
return pi
UpperCAmelCase__ :Tuple = list(set(UpperCamelCase_ ) )
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : Sequence[float] ):
UpperCAmelCase__ :int = []
for minterm in minterms:
UpperCAmelCase__ :int = ''''''
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ :Optional[int] = str(minterm % 2 ) + string
minterm //= 2
temp.append(UpperCamelCase_ )
return temp
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = list(UpperCamelCase_ )
UpperCAmelCase__ :str = list(UpperCamelCase_ )
UpperCAmelCase__ :str = 0
for i in range(len(UpperCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( UpperCamelCase_ : list[list[int]], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :Optional[Any] = []
UpperCAmelCase__ :List[Any] = [0] * len(UpperCamelCase_ )
for i in range(len(chart[0] ) ):
UpperCAmelCase__ :Optional[Any] = 0
UpperCAmelCase__ :Union[str, Any] = -1
for j in range(len(UpperCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase__ :Any = j
if count == 1:
UpperCAmelCase__ :Any = 1
for i in range(len(UpperCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase__ :Optional[int] = 0
UpperCAmelCase__ :Dict = -1
UpperCAmelCase__ :Optional[Any] = 0
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase__ :Any = count_n
UpperCAmelCase__ :List[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Optional[int] = 0
def a__ ( UpperCamelCase_ : list[str], UpperCamelCase_ : list[str] ):
UpperCAmelCase__ :List[str] = [[0 for x in range(len(UpperCamelCase_ ) )] for x in range(len(UpperCamelCase_ ) )]
for i in range(len(UpperCamelCase_ ) ):
UpperCAmelCase__ :Tuple = prime_implicants[i].count('''_''' )
for j in range(len(UpperCamelCase_ ) ):
if is_for_table(prime_implicants[i], binary[j], UpperCamelCase_ ):
UpperCAmelCase__ :List[str] = 1
return chart
def a__ ( ):
UpperCAmelCase__ :int = int(input('''Enter the no. of variables\n''' ) )
UpperCAmelCase__ :Tuple = [
float(UpperCamelCase_ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
UpperCAmelCase__ :Union[str, Any] = decimal_to_binary(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Optional[Any] = check(UpperCamelCase_ )
print('''Prime Implicants are:''' )
print(UpperCamelCase_ )
UpperCAmelCase__ :Optional[int] = prime_implicant_chart(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :Dict = selection(UpperCamelCase_, UpperCamelCase_ )
print('''Essential Prime Implicants are:''' )
print(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 467
| 0
|
'''simple docstring'''
from statistics import mean
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int ) -> list:
__snake_case = 0
# Number of processes finished
__snake_case = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case = [0] * no_of_process
# List to include calculation results
__snake_case = [0] * no_of_process
# Sort by arrival time.
__snake_case = [burst_time[i] for i in np.argsort(_UpperCAmelCase )]
__snake_case = [process_name[i] for i in np.argsort(_UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case = arrival_time[i]
__snake_case = 0
# Index showing the location of the process being performed
__snake_case = 0
# Saves the current response ratio.
__snake_case = 0
for i in range(0 , _UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case = temp
__snake_case = i
# Calculate the turn around time
__snake_case = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int ) -> list:
__snake_case = [0] * no_of_process
for i in range(0 , _UpperCAmelCase ):
__snake_case = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
a : Dict = 5
a : List[Any] = ['''A''', '''B''', '''C''', '''D''', '''E''']
a : List[str] = [1, 2, 3, 4, 5]
a : str = [1, 2, 3, 4, 5]
a : List[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
a : Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 700
|
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
a_ : str = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
a_ : str = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
a_ : Tuple = '▁'
class _snake_case ( A__ ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(a))
SCREAMING_SNAKE_CASE = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE = len(self.sp_model) - 1
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is None:
return [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1, 1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
return self.sp_model.encode(a , out_type=a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(a)
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(a)
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(a)
return out_string.strip()
def __getstate__( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> int:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 73
|
import sys
import turtle
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase) , get_mid(_UpperCAmelCase , _UpperCAmelCase) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
a_ : Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
a_ : str = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 73
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCamelCase ( __A ):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: List[str] = tempfile.mkdtemp()
_snake_case: Union[str, Any] = 8
# DPR tok
_snake_case: int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case: List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_snake_case: List[str] = os.path.join(__snake_case , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
_snake_case: Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case: List[str] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_snake_case: int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case: List[str] = {'unk_token': '<unk>'}
_snake_case: List[str] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__snake_case , exist_ok=__snake_case )
_snake_case: str = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['vocab_file'] )
_snake_case: List[str] = os.path.join(__snake_case , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Dict = os.path.join(self.tmpdirname , 'rag_tokenizer' )
_snake_case: Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
_snake_case: Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__snake_case )
rag_tokenizer.save_pretrained(__snake_case )
_snake_case: int = RagTokenizer.from_pretrained(__snake_case , config=__snake_case )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __snake_case )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __snake_case )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[str] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
_snake_case: Optional[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
_snake_case: str = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: str = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
_snake_case: int = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
_snake_case: str = tokenizer(__snake_case )
self.assertIsNotNone(__snake_case )
| 704
|
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 0
|
'''simple docstring'''
def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__snake_case : Optional[Any] = min(_lowerCamelCase , _lowerCamelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 26
|
def lowercase ( _a ) -> int:
if not isinstance(_a ,_a ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
UpperCAmelCase_: List[Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = size if size is not None else {"""shortest_edge""": 20}
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = image_size
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[Any] = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size
SCREAMING_SNAKE_CASE__ : List[str] = do_flip_channel_order
def __magic_name__ (self ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = MobileViTImageProcessor if is_vision_available() else None
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileViTImageProcessingTester(self )
@property
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_flip_channel_order""" ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 545
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''unispeech'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.5 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE__ : Any = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Dict = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : int = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = final_dropout
SCREAMING_SNAKE_CASE__ : str = layerdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_ctc_classes
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : Dict = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Tuple = apply_spec_augment
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Any = mask_feature_length
SCREAMING_SNAKE_CASE__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Dict = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : Any = num_codevector_groups
SCREAMING_SNAKE_CASE__ : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : str = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_negatives
SCREAMING_SNAKE_CASE__ : Tuple = codevector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : Tuple = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : str = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ : List[str] = replace_prob
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 545
| 1
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __A ( _lowercase ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def __A ( _lowercase ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 42
A_ = 42
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
def __A ( self: List[Any] ) -> Tuple:
_A = {}
_A = []
_A = 1
_A = [1, 2]
_A = {"a": 1, "b": 2}
_A = {"a": [1, 2], "b": [3, 4]}
_A = {"a": {"1": 1}, "b": 2}
_A = {"a": 1, "b": 2, "c": 3, "d": 4}
_A = {}
_A = []
_A = 2
_A = [2, 3]
_A = {"a": 2, "b": 3}
_A = {"a": [2, 3], "b": [4, 5]}
_A = {"a": {"1": 2}, "b": 3}
_A = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
_A = 2
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
_A = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_A = {"a": 2, "b": 0, "c": 2}
_A = {
"a": np.eye(2 ).astype(lowerCamelCase__ ),
"b": np.zeros(3 ).astype(lowerCamelCase__ ),
"c": np.ones(2 ).astype(lowerCamelCase__ ),
}
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(lowerCamelCase__ , lowerCamelCase__ , map_numpy=lowerCamelCase__ , num_proc=lowerCamelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(lowerCamelCase__ ): # can't pickle a local lambda
map_nested(lambda __A : x + 1 , lowerCamelCase__ , num_proc=lowerCamelCase__ )
def __A ( self: str ) -> Optional[int]:
_A = {"a": 1, "b": 2}
_A = {"a": 3, "b": 4}
_A = {"a": 5, "b": 6}
_A = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) ) , lowerCamelCase__ )
def __A ( self: List[Any] ) -> Union[str, Any]:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 'bar'
_A = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(lowerCamelCase__ , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
_A = {f"""{i}""": i for i in range(_lowercase )}
_A = map_nested(lambda _lowercase : x + 10 , _lowercase , num_proc=_lowercase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
@require_tf
def __A ( self: str ) -> Union[str, Any]:
import tensorflow as tf
from tensorflow.keras import layers
_A = layers.Dense(2 )
def gen_random_output():
_A = tf.random.uniform((1, 3) )
return model(lowerCamelCase__ ).numpy()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
_A = gen_random_output()
with temp_seed(42 , set_tensorflow=lowerCamelCase__ ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __A ( self: Optional[int] ) -> Any:
import torch
def gen_random_output():
_A = torch.nn.Linear(3 , 2 )
_A = torch.rand(1 , 3 )
return model(lowerCamelCase__ ).detach().numpy()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
_A = gen_random_output()
with temp_seed(42 , set_pytorch=lowerCamelCase__ ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __A ( self: Optional[Any] ) -> int:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_A = gen_random_output()
with temp_seed(42 ):
_A = gen_random_output()
_A = gen_random_output()
np.testing.assert_equal(lowerCamelCase__ , lowerCamelCase__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def __A ( _lowercase ):
'''simple docstring'''
_A = NestedDataStructure(_lowercase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = NestedDataStructure(_lowercase ).flatten()
assert output == expected_output
def __A ( ):
'''simple docstring'''
_A = A(x=1 , y='''foobar''' )
_A = {"x": 1, "y": "foobar"}
assert asdict(_lowercase ) == expected_output
_A = {"a": {"b": A(x=10 , y='''foo''' )}, "c": [A(x=20 , y='''bar''' )]}
_A = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_lowercase ) == expected_output
with pytest.raises(_lowercase ):
asdict([1, A(x=10 , y='''foo''' )] )
def __A ( _lowercase ):
'''simple docstring'''
return text.split()
def __A ( _lowercase ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __A ( ):
'''simple docstring'''
with Pool(2 ) as pool:
_A = list(iflatmap_unordered(_lowercase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(_lowercase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_A = list(iflatmap_unordered(_lowercase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(_lowercase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_A = []
for yield_time, content in iflatmap_unordered(
_lowercase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_lowercase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(_lowercase ) == 4
| 484
|
def snake_case__ ( lowercase , lowercase ):
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 613
| 0
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCamelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = 'pixel_values'
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Tuple = TimmBackboneConfig
def __init__( self , snake_case , **snake_case ):
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(snake_case )
UpperCamelCase__ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(snake_case , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
UpperCamelCase__ = getattr(snake_case , "use_pretrained_backbone" , snake_case )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase__ = config.out_indices if getattr(snake_case , "out_indices" , snake_case ) is not None else (-1,)
UpperCamelCase__ = timm.create_model(
config.backbone , pretrained=snake_case , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case , **snake_case , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase__ = self._backbone.return_layers
UpperCamelCase__ = {layer["module"]: str(snake_case ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case )
@classmethod
def snake_case__ ( cls , snake_case , *snake_case , **snake_case ):
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase__ = kwargs.pop("config" , TimmBackboneConfig() )
UpperCamelCase__ = kwargs.pop("use_timm_backbone" , snake_case )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
UpperCamelCase__ = kwargs.pop("num_channels" , config.num_channels )
UpperCamelCase__ = kwargs.pop("features_only" , config.features_only )
UpperCamelCase__ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
UpperCamelCase__ = kwargs.pop("out_indices" , config.out_indices )
UpperCamelCase__ = TimmBackboneConfig(
backbone=snake_case , num_channels=snake_case , features_only=snake_case , use_pretrained_backbone=snake_case , out_indices=snake_case , )
return super()._from_config(snake_case , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
pass
def snake_case__ ( self , snake_case , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase__ = self._all_layers
UpperCamelCase__ = self._backbone(snake_case , **snake_case )
UpperCamelCase__ = self._return_layers
UpperCamelCase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase__ = self._backbone(snake_case , **snake_case )
UpperCamelCase__ = None
UpperCamelCase__ = tuple(snake_case )
UpperCamelCase__ = tuple(snake_case ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase__ = (feature_maps,)
if output_hidden_states:
UpperCamelCase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case , hidden_states=snake_case , attentions=snake_case )
| 185
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = 'ibert'
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=False , snake_case="none" , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = quant_mode
UpperCamelCase__ = force_dequant
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 185
| 1
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , __a : Optional[Any]=None , **__a : Tuple ) -> Tuple:
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , __a , )
super().__init__(args=__a , **__a )
| 624
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __a : Any , __a : Optional[Any]=7 , __a : Tuple=3 , __a : int=18 , __a : Optional[Any]=30 , __a : Optional[Any]=400 , __a : List[str]=True , __a : Any=32 , __a : Tuple=True , ) -> List[str]:
_UpperCamelCase : List[str] = parent
_UpperCamelCase : int = batch_size
_UpperCamelCase : List[str] = num_channels
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Optional[Any] = min_resolution
_UpperCamelCase : Union[str, Any] = max_resolution
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : str = size_divisor
_UpperCamelCase : Optional[Any] = do_rescale
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = GLPNImageProcessor if is_vision_available() else None
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Optional[Any] = GLPNImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_resize" ) )
self.assertTrue(hasattr(__a , "size_divisor" ) )
self.assertTrue(hasattr(__a , "resample" ) )
self.assertTrue(hasattr(__a , "do_rescale" ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
# Initialize image_processing
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
# Initialize image_processing
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
# Initialize image_processing
_UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 624
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
a__ = '''sshleifer/student_marian_en_ro_6_1'''
a__ = '''sshleifer/tiny-mbart'''
@require_torch
class __magic_name__( __lowerCAmelCase ):
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : Any=False , __UpperCamelCase : int=None , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=True , ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , predict_with_generate=__UpperCamelCase , do_train=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__UpperCamelCase )
@require_apex
@require_torch_gpu
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__UpperCamelCase , extra_args_str=data["""extra_args_str"""] )
snake_case__ = len(re.findall(__UpperCamelCase , cl.err ) )
self.assertEqual(__UpperCamelCase , data["""n_matches"""] )
@slow
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(__UpperCamelCase )
snake_case__ = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__UpperCamelCase : str ) -> Tuple[int, float]:
snake_case__ = """--skip_memory_metrics 0"""
snake_case__ = self.run_trainer(
max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__UpperCamelCase , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
snake_case__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
snake_case__ = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__UpperCamelCase , __UpperCamelCase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __lowerCAmelCase( self : str , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : float = 3E-3 , __UpperCamelCase : str = "adafactor" , __UpperCamelCase : bool = False , __UpperCamelCase : str = None , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = None , ):
'''simple docstring'''
snake_case__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
snake_case__ = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__UpperCamelCase )}
""".split()
snake_case__ = """
--do_predict
""".split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ["""run_translation.py"""] + args
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
main()
return output_dir
| 566
| 0
|
"""simple docstring"""
from string import ascii_uppercase
_A = {char: i for i, char in enumerate(ascii_uppercase)}
_A = dict(enumerate(ascii_uppercase))
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = len(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = 0
while True:
if x == i:
UpperCAmelCase__ : Union[str, Any] = 0
if len(lowerCAmelCase ) == len(lowerCAmelCase ):
break
key += key[i]
i += 1
return key
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : int = """"""
UpperCAmelCase__ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase__ : List[str] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase__ : Dict = """"""
UpperCAmelCase__ : Optional[int] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase__ : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a__ ( ) -> None:
UpperCAmelCase__ : Optional[int] = """THE GERMAN ATTACK"""
UpperCAmelCase__ : Any = """SECRET"""
UpperCAmelCase__ : Optional[Any] = generate_key(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = cipher_text(lowerCAmelCase , lowerCAmelCase )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(lowerCAmelCase , lowerCAmelCase )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 182
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 182
| 1
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__a = """\
"""
__a = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
__a = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : List[str] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def _lowercase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int = 1_6 , __lowerCamelCase : bool = True , __lowerCamelCase : str=None ) -> Tuple:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase = """cuda"""
else:
UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__lowerCamelCase )
UpperCAmelCase = model.to(__lowerCamelCase )
UpperCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase = model.config.max_length - 1
else:
UpperCAmelCase = model.config.max_length
UpperCAmelCase = tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors="""pt""" , return_attention_mask=__lowerCamelCase , ).to(__lowerCamelCase )
UpperCAmelCase = encodings["""input_ids"""]
UpperCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase = []
UpperCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__lowerCamelCase ) , __lowerCamelCase ) ):
UpperCAmelCase = min(start_index + batch_size , len(__lowerCamelCase ) )
UpperCAmelCase = encoded_texts[start_index:end_index]
UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCamelCase )
UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCamelCase ), attn_mask] , dim=1 )
UpperCAmelCase = encoded_batch
with torch.no_grad():
UpperCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase ).logits
UpperCAmelCase = out_logits[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = attn_mask[..., 1:].contiguous()
UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCamelCase )}
| 703
|
import numpy
class __lowercase :
def __init__( self : Union[str, Any] , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : numpy.ndarray ) -> None:
"""simple docstring"""
UpperCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase = numpy.zeros(output_array.shape )
def _lowercase ( self : List[str] ) -> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowercase ( self : Optional[Any] ) -> None:
"""simple docstring"""
UpperCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowercase ( self : Any , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : int , __lowerCamelCase : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def _lowercase ( self : List[str] , __lowerCamelCase : numpy.ndarray ) -> int:
"""simple docstring"""
UpperCAmelCase = input_arr
UpperCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return (value) * (1 - (value))
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=lowerCAmelCase_ , output_array=lowerCAmelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCAmelCase_ , iterations=1_0 , give_loss=lowerCAmelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 627
| 0
|
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCamelCase__ = '.'
if __name__ == "__main__":
UpperCamelCase__ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
UpperCamelCase__ = []
UpperCamelCase__ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCamelCase__ = line.strip()
UpperCamelCase__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCamelCase__ = '\n'.join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 110
|
"""simple docstring"""
from typing import Any
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = data
UpperCAmelCase__ : List[str] = None
def __repr__( self ):
return F'''Node({self.data})'''
class a :
def __init__( self ):
UpperCAmelCase__ : Any = None
def __iter__( self ):
UpperCAmelCase__ : List[str] = self.head
while node:
yield node.data
UpperCAmelCase__ : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(UpperCamelCase_ ) for item in self] )
def __getitem__( self , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
UpperCAmelCase__ : List[str] = self.head
for _ in range(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = current.next
UpperCAmelCase__ : List[str] = data
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(len(self ) , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
self.insert_nth(0 , UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
UpperCAmelCase__ : str = Node(UpperCamelCase_ )
if self.head is None:
UpperCAmelCase__ : Tuple = new_node
elif index == 0:
UpperCAmelCase__ : Optional[int] = self.head # link new_node to head
UpperCAmelCase__ : Any = new_node
else:
UpperCAmelCase__ : Dict = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Tuple = temp.next
UpperCAmelCase__ : int = temp.next
UpperCAmelCase__ : Tuple = new_node
def __snake_case ( self ): # print every node data
print(self )
def __snake_case ( self ):
return self.delete_nth(0 )
def __snake_case ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __snake_case ( self , UpperCamelCase_ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
UpperCAmelCase__ : Union[str, Any] = self.head # default first node
if index == 0:
UpperCAmelCase__ : Dict = self.head.next
else:
UpperCAmelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCAmelCase__ : Any = temp.next
UpperCAmelCase__ : Dict = temp.next
UpperCAmelCase__ : Tuple = temp.next.next
return delete_node.data
def __snake_case ( self ):
return self.head is None
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
UpperCAmelCase__ : Any = current.next
# Make the current node's next point backwards
UpperCAmelCase__ : Optional[int] = prev
# Make the previous node be the current node
UpperCAmelCase__ : List[Any] = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase__ : int = next_node
# Return prev in order to put the head at the end
UpperCAmelCase__ : str = prev
def lowerCamelCase ( ):
UpperCAmelCase__ : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(_snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_snake_case ) == i
linked_list.insert_nth(_snake_case ,i + 1 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_snake_case ) == 9
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
UpperCAmelCase__ : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(-8 ,1 ) )
def lowerCamelCase ( ):
UpperCAmelCase__ : int = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase__ : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase__ : Any = linked_list.delete_head()
assert result == -9
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase__ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase__ : Dict = linked_list.delete_nth(10 )
assert result is None
assert (
str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_snake_case )
assert (
str(_snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ( ):
from doctest import testmod
testmod()
UpperCAmelCase__ : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_snake_case )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCAmelCase__ : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_snake_case )
print(F'''length of linked_list is : {len(_snake_case )}''' )
if __name__ == "__main__":
main()
| 110
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase = StableDiffusionPanoramaPipeline
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_a : List[Any] = DDIMScheduler()
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
_a : int = CLIPTextModel(__UpperCamelCase )
_a : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self , lowercase , lowercase=0 ) -> Tuple:
_a : int = torch.manual_seed(__UpperCamelCase )
_a : Optional[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self ) -> Dict:
_a : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : str = self.get_dummy_components()
_a : Tuple = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
_a : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a : str = self.get_dummy_inputs(__UpperCamelCase )
_a : List[Any] = sd_pipe(**__UpperCamelCase ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[str] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self ) -> List[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__( self ) -> Tuple:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def snake_case__( self ) -> Optional[int]:
_a : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : Optional[int] = self.get_dummy_components()
_a : List[Any] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
_a : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a : Any = self.get_dummy_inputs(__UpperCamelCase )
_a : Optional[int] = '''french fries'''
_a : str = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
_a : int = output.images
_a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self ) -> Union[str, Any]:
_a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : Tuple = self.get_dummy_components()
_a : Optional[int] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
_a : Tuple = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a : Union[str, Any] = self.get_dummy_inputs(__UpperCamelCase )
_a : int = sd_pipe(**__UpperCamelCase , view_batch_size=2 )
_a : Dict = output.images
_a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self ) -> Union[str, Any]:
_a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : List[str] = self.get_dummy_components()
_a : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_a : Union[str, Any] = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
_a : Union[str, Any] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a : Dict = self.get_dummy_inputs(__UpperCamelCase )
_a : Tuple = sd_pipe(**__UpperCamelCase ).images
_a : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self ) -> Tuple:
_a : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : Optional[int] = self.get_dummy_components()
_a : Union[str, Any] = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__UpperCamelCase )
_a : Tuple = StableDiffusionPanoramaPipeline(**__UpperCamelCase )
_a : List[str] = sd_pipe.to(__UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCamelCase )
_a : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
_a : Any = sd_pipe(**__UpperCamelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Optional[int] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def snake_case__( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self , lowercase=0 ) -> Optional[int]:
_a : Dict = torch.manual_seed(__UpperCamelCase )
_a : Dict = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self ) -> List[Any]:
_a : Union[str, Any] = '''stabilityai/stable-diffusion-2-base'''
_a : int = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
_a : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_a : Any = self.get_inputs()
_a : List[str] = pipe(**__UpperCamelCase ).images
_a : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
_a : Optional[Any] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def snake_case__( self ) -> Union[str, Any]:
_a : Dict = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__UpperCamelCase )
_a : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_a : Optional[Any] = self.get_inputs()
_a : Optional[int] = pipe(**__UpperCamelCase ).images
_a : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
_a : int = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__( self ) -> Optional[Any]:
_a : Optional[Any] = 0
def callback_fn(lowercase , lowercase , lowercase ) -> None:
_a : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_a : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a : Any = latents[0, -3:, -3:, -1]
_a : int = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_a : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a : Dict = latents[0, -3:, -3:, -1]
_a : Optional[Any] = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_a : Union[str, Any] = False
_a : Union[str, Any] = '''stabilityai/stable-diffusion-2-base'''
_a : int = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
_a : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
_a : Optional[int] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
_a : str = self.get_inputs()
pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : List[str] = '''stabilityai/stable-diffusion-2-base'''
_a : Dict = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder='''scheduler''' )
_a : Any = StableDiffusionPanoramaPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
_a : Optional[Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Any = self.get_inputs()
_a : Any = pipe(**__UpperCamelCase )
_a : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 707
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCamelCase = logging.get_logger(__name__)
# General docstring
__lowerCamelCase = 'ResNetConfig'
# Base docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCamelCase = 'microsoft/resnet-50'
__lowerCamelCase = 'tiger cat'
__lowerCamelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = "relu" ) -> str:
super().__init__()
_a : str = nn.Convad(
lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , bias=lowercase )
_a : Optional[Any] = nn.BatchNormad(lowercase )
_a : int = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__( self , lowercase ) -> Tensor:
_a : Union[str, Any] = self.convolution(lowercase )
_a : List[str] = self.normalization(lowercase )
_a : List[str] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Optional[int]:
super().__init__()
_a : int = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_a : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_a : Union[str, Any] = config.num_channels
def snake_case__( self , lowercase ) -> Tensor:
_a : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_a : Any = self.embedder(lowercase )
_a : Optional[int] = self.pooler(lowercase )
return embedding
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 2 ) -> Dict:
super().__init__()
_a : str = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase )
_a : Union[str, Any] = nn.BatchNormad(lowercase )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = self.convolution(lowercase )
_a : Any = self.normalization(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" ) -> List[str]:
super().__init__()
_a : List[str] = in_channels != out_channels or stride != 1
_a : List[Any] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : List[str] = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , activation=lowercase ) , )
_a : Dict = ACTaFN[activation]
def snake_case__( self , lowercase ) -> Optional[int]:
_a : List[Any] = hidden_state
_a : Optional[Any] = self.layer(lowercase )
_a : int = self.shortcut(lowercase )
hidden_state += residual
_a : Dict = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase = 1 , lowercase = "relu" , lowercase = 4 ) -> Dict:
super().__init__()
_a : Union[str, Any] = in_channels != out_channels or stride != 1
_a : Union[str, Any] = out_channels // reduction
_a : List[str] = (
ResNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity()
)
_a : Dict = nn.Sequential(
ResNetConvLayer(lowercase , lowercase , kernel_size=1 ) , ResNetConvLayer(lowercase , lowercase , stride=lowercase ) , ResNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , )
_a : List[str] = ACTaFN[activation]
def snake_case__( self , lowercase ) -> str:
_a : List[str] = hidden_state
_a : Optional[int] = self.layer(lowercase )
_a : Any = self.shortcut(lowercase )
hidden_state += residual
_a : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]:
super().__init__()
_a : List[str] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_a : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , stride=lowercase , activation=config.hidden_act ) , *[layer(lowercase , lowercase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__( self , lowercase ) -> Tensor:
_a : Optional[int] = input
for layer in self.layers:
_a : Any = layer(lowercase )
return hidden_state
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowercase ) -> Any:
super().__init__()
_a : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ):
self.stages.append(ResNetStage(lowercase , lowercase , lowercase , depth=lowercase ) )
def snake_case__( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention:
_a : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Dict = hidden_states + (hidden_state,)
_a : List[str] = stage_module(lowercase )
if output_hidden_states:
_a : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowercase , hidden_states=lowercase , )
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ResNetConfig
lowercase = '''resnet'''
lowercase = '''pixel_values'''
lowercase = True
def snake_case__( self , lowercase ) -> Any:
if isinstance(lowercase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__( self , lowercase , lowercase=False ) -> int:
if isinstance(lowercase , lowercase ):
_a : List[str] = value
__lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> int:
super().__init__(lowercase )
_a : Any = config
_a : Optional[int] = ResNetEmbeddings(lowercase )
_a : Any = ResNetEncoder(lowercase )
_a : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = encoder_outputs[0]
_a : str = self.pooler(lowercase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
_a : str = config.num_labels
_a : List[str] = ResNetModel(lowercase )
# classification head
_a : Optional[int] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention:
_a : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.resnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : int = outputs.pooler_output if return_dict else outputs[1]
_a : str = self.classifier(lowercase )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Any = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Optional[Any] = '''single_label_classification'''
else:
_a : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_a : Optional[Any] = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a : List[str] = loss_fct(lowercase , lowercase )
elif self.config.problem_type == "single_label_classification":
_a : str = CrossEntropyLoss()
_a : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[Any] = BCEWithLogitsLoss()
_a : List[Any] = loss_fct(lowercase , lowercase )
if not return_dict:
_a : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCamelCase , )
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase ):
def __init__( self , lowercase ) -> str:
super().__init__(lowercase )
super()._init_backbone(lowercase )
_a : Optional[int] = [config.embedding_size] + config.hidden_sizes
_a : Any = ResNetEmbeddings(lowercase )
_a : List[str] = ResNetEncoder(lowercase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase )
@replace_return_docstrings(output_type=lowercase , config_class=_CONFIG_FOR_DOC )
def snake_case__( self , lowercase , lowercase = None , lowercase = None ) -> BackboneOutput:
_a : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_a : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : List[Any] = self.embedder(lowercase )
_a : Tuple = self.encoder(lowercase , output_hidden_states=lowercase , return_dict=lowercase )
_a : str = outputs.hidden_states
_a : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_a : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowercase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowercase , )
| 307
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.