code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from PIL import Image
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Image:
"""simple docstring"""
def brightness(UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
_A : int = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 315
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =BioGptTokenizer
a_ : Any =False
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_snake_case : List[str] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = 'lower newer'
_snake_case : Optional[int] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
_snake_case : Tuple = 'lower'
_snake_case : Optional[Any] = ['low', 'er</w>']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = tokens + ['<unk>']
_snake_case : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_snake_case : Any = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 411
| 0
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A_ : Optional[Any] =[
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def snake_case_ ( ) -> str:
lowerCAmelCase_ = Github(os.environ['''GITHUB_TOKEN'''])
lowerCAmelCase_ = g.get_repo('''huggingface/diffusers''')
lowerCAmelCase_ = repo.get_issues(state='''open''')
for issue in open_issues:
lowerCAmelCase_ = sorted(issue.get_comments() , key=lambda __snake_case: i.created_at , reverse=__a)
lowerCAmelCase_ = comments[0] if len(__a) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''')
issue.remove_from_labels('''stale''')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
issue.add_to_labels('''stale''')
if __name__ == "__main__":
main()
| 720
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : str =16
A_ : Any =32
def snake_case_ ( __snake_case : Accelerator , __snake_case : int = 16) -> List[Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(__snake_case : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(__snake_case : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Any =mocked_dataloaders # noqa: F811
def snake_case_ ( __snake_case : int , __snake_case : int) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''])
lowerCAmelCase_ = int(config['''seed'''])
lowerCAmelCase_ = int(config['''batch_size'''])
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''')
set_seed(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case):
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = output.loss
accelerator.backward(__snake_case)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = outputs.logits.argmax(dim=-1)
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case)
def snake_case_ ( ) -> Optional[Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 606
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
def lowercase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {
'repo_id': str(SCREAMING_SNAKE_CASE ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE , 'git_log.json' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , indent=4 )
def lowercase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if params.n_gpu <= 0:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
SCREAMING_SNAKE_CASE_ = int(os.environ['WORLD_SIZE'] )
SCREAMING_SNAKE_CASE_ = int(os.environ['N_GPU_NODE'] )
SCREAMING_SNAKE_CASE_ = int(os.environ['RANK'] )
# number of nodes / node ID
SCREAMING_SNAKE_CASE_ = params.world_size // params.n_gpu_per_node
SCREAMING_SNAKE_CASE_ = params.global_rank // params.n_gpu_per_node
SCREAMING_SNAKE_CASE_ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
SCREAMING_SNAKE_CASE_ = params.node_id == 0 and params.local_rank == 0
SCREAMING_SNAKE_CASE_ = params.n_nodes > 1
# summary
SCREAMING_SNAKE_CASE_ = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 205
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = '''mobilenet_v2'''
def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="relu6" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.8 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.0_0_1 , SCREAMING_SNAKE_CASE=255 , **SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = depth_multiplier
SCREAMING_SNAKE_CASE_ = depth_divisible_by
SCREAMING_SNAKE_CASE_ = min_depth
SCREAMING_SNAKE_CASE_ = expand_ratio
SCREAMING_SNAKE_CASE_ = output_stride
SCREAMING_SNAKE_CASE_ = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ = finegrained_output
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = tf_padding
SCREAMING_SNAKE_CASE_ = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = semantic_loss_ignore_index
class a_ ( SCREAMING_SNAKE_CASE__ ):
A = version.parse('''1.11''' )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def A_( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def A_( self ) -> float:
"""simple docstring"""
return 1e-4
| 205
| 1
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a( __A ):
def __init__( self , *__snake_case , __snake_case=None , __snake_case=None , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
_snake_case : str = eval_examples
_snake_case : int = post_process_function
def lowercase ( self , __snake_case = None , __snake_case=None , __snake_case = None , __snake_case = "eval" , **__snake_case , ) -> Dict[str, float]:
'''simple docstring'''
_snake_case : Tuple = gen_kwargs.copy()
_snake_case : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
_snake_case : Optional[int] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
_snake_case : str = gen_kwargs
_snake_case : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
_snake_case : Any = self.get_eval_dataloader(__snake_case )
_snake_case : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case : List[str] = self.compute_metrics
_snake_case : Dict = None
_snake_case : Optional[Any] = time.time()
_snake_case : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case : Any = eval_loop(
__snake_case , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
_snake_case : List[str] = compute_metrics
_snake_case : str = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_snake_case : int = self.post_process_function(__snake_case , __snake_case , __snake_case )
_snake_case : Union[str, Any] = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case : Tuple = metrics.pop(__snake_case )
metrics.update(output.metrics )
else:
_snake_case : Tuple = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_snake_case : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , __snake_case )
return metrics
def lowercase ( self , __snake_case , __snake_case , __snake_case=None , __snake_case = "test" , **__snake_case ) -> Tuple:
'''simple docstring'''
_snake_case : Optional[Any] = gen_kwargs.copy()
_snake_case : Tuple = self.get_test_dataloader(__snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
_snake_case : List[Any] = self.compute_metrics
_snake_case : List[str] = None
_snake_case : Optional[int] = time.time()
_snake_case : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_snake_case : Union[str, Any] = eval_loop(
__snake_case , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__snake_case , metric_key_prefix=__snake_case , )
finally:
_snake_case : Optional[int] = compute_metrics
_snake_case : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__snake_case , __snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_snake_case : List[Any] = self.post_process_function(__snake_case , __snake_case , __snake_case , "predict" )
_snake_case : Union[str, Any] = self.compute_metrics(__snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
_snake_case : Optional[int] = metrics.pop(__snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__snake_case )
| 278
|
import numpy as np
__lowerCAmelCase :Dict = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _a:
def __init__( self ) -> None:
'''simple docstring'''
_snake_case : Optional[Any] = np.array(__snake_case )
def lowercase ( self , __snake_case ) -> np.ndarray:
'''simple docstring'''
_snake_case , _snake_case : List[Any] = np.where(letter == self.SQUARE )
_snake_case : Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase ( self , __snake_case , __snake_case ) -> str:
'''simple docstring'''
_snake_case : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = message.lower()
_snake_case : List[Any] = message.replace(" " , "" )
_snake_case : Any = message.replace("j" , "i" )
_snake_case : Tuple = np.empty((2, len(__snake_case )) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Dict = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : List[Any] = numbers[1]
_snake_case : Any = first_step.reshape(2 * len(__snake_case ) )
_snake_case : Optional[Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Optional[int] = int(second_step[numbers_index * 2] )
_snake_case : List[str] = int(second_step[(numbers_index * 2) + 1] )
_snake_case : Any = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : Any = encoded_message + letter
return encoded_message
def lowercase ( self , __snake_case ) -> str:
'''simple docstring'''
_snake_case : Union[str, Any] = message.lower()
message.replace(" " , "" )
_snake_case : Tuple = np.empty(2 * len(__snake_case ) )
for letter_index in range(len(__snake_case ) ):
_snake_case : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
_snake_case : Tuple = numbers[0]
_snake_case : Any = numbers[1]
_snake_case : Optional[Any] = first_step.reshape((2, len(__snake_case )) )
_snake_case : Union[str, Any] = ""
for numbers_index in range(len(__snake_case ) ):
_snake_case : Dict = int(second_step[0, numbers_index] )
_snake_case : Optional[Any] = int(second_step[1, numbers_index] )
_snake_case : Union[str, Any] = self.numbers_to_letter(__snake_case , __snake_case )
_snake_case : str = decoded_message + letter
return decoded_message
| 278
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _lowercase :
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
sample_size=32 ,layers_per_block=1 ,block_out_channels=[32, 64] ,down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] ,mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' ,up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] ,in_channels=3 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type='''text''' ,addition_embed_type_num_heads=2 ,cross_attention_norm='''group_norm''' ,resnet_time_scale_shift='''scale_shift''' ,act_fn='''gelu''' ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase__ : str = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='''squaredcos_cap_v2''' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=_SCREAMING_SNAKE_CASE ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='''epsilon''' ,variance_type='''learned_range''' ,)
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Any = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase__ : str = UNetaDConditionModel(
sample_size=32 ,layers_per_block=[1, 2] ,block_out_channels=[32, 64] ,down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] ,mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' ,up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] ,in_channels=6 ,out_channels=6 ,cross_attention_dim=32 ,encoder_hid_dim=32 ,attention_head_dim=8 ,addition_embed_type='''text''' ,addition_embed_type_num_heads=2 ,cross_attention_norm='''group_norm''' ,resnet_time_scale_shift='''scale_shift''' ,act_fn='''gelu''' ,class_embed_type='''timestep''' ,mid_block_scale_factor=1.414 ,time_embedding_act_fn='''gelu''' ,time_embedding_dim=32 ,)
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase__ : Any = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='''squaredcos_cap_v2''' ,beta_start=0.0001 ,beta_end=0.02 ,thresholding=_SCREAMING_SNAKE_CASE ,dynamic_thresholding_ratio=0.95 ,sample_max_value=1.0 ,prediction_type='''epsilon''' ,variance_type='''learned_range''' ,)
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = DDPMScheduler(
num_train_timesteps=1000 ,beta_schedule='''squaredcos_cap_v2''' ,beta_start=0.0001 ,beta_end=0.02 ,)
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_dummy_components()
UpperCAmelCase__ : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = inputs["prompt"]
UpperCAmelCase__ : Dict = inputs["generator"]
UpperCAmelCase__ : int = inputs["num_inference_steps"]
UpperCAmelCase__ : str = inputs["output_type"]
if "image" in inputs:
UpperCAmelCase__ : Union[str, Any] = inputs["image"]
else:
UpperCAmelCase__ : Tuple = None
if "mask_image" in inputs:
UpperCAmelCase__ : int = inputs["mask_image"]
else:
UpperCAmelCase__ : List[Any] = None
if "original_image" in inputs:
UpperCAmelCase__ : Dict = inputs["original_image"]
else:
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : int = pipe.encode_prompt(_SCREAMING_SNAKE_CASE )
# inputs with prompt converted to embeddings
UpperCAmelCase__ : int = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
UpperCAmelCase__ : Optional[Any] = image
if mask_image is not None:
UpperCAmelCase__ : Optional[int] = mask_image
if original_image is not None:
UpperCAmelCase__ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[int] = pipe(**_SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Union[str, Any] = self.pipeline_class.from_pretrained(_SCREAMING_SNAKE_CASE )
pipe_loaded.to(_SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) is None ,f'''`{optional_component}` did not stay set to None after loading.''' ,)
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = inputs["generator"]
UpperCAmelCase__ : Any = inputs["num_inference_steps"]
UpperCAmelCase__ : List[str] = inputs["output_type"]
# inputs with prompt converted to embeddings
UpperCAmelCase__ : List[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
UpperCAmelCase__ : Tuple = image
if mask_image is not None:
UpperCAmelCase__ : str = mask_image
if original_image is not None:
UpperCAmelCase__ : List[str] = original_image
UpperCAmelCase__ : Optional[Any] = pipe_loaded(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase__ : str = np.abs(to_np(_SCREAMING_SNAKE_CASE ) - to_np(_SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(_SCREAMING_SNAKE_CASE ,1e-4 )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = pipe(**_SCREAMING_SNAKE_CASE )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = self.pipeline_class.from_pretrained(_SCREAMING_SNAKE_CASE )
pipe_loaded.to(_SCREAMING_SNAKE_CASE )
pipe_loaded.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[str] = pipe_loaded(**_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase__ : int = np.abs(to_np(_SCREAMING_SNAKE_CASE ) - to_np(_SCREAMING_SNAKE_CASE ) ).max()
self.assertLess(_SCREAMING_SNAKE_CASE ,1e-4 )
| 614
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'sentencepiece.model'}
UpperCamelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCamelCase = {
'google/rembert': 2_56,
}
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> int:
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
a_ : List[str] = do_lower_case
a_ : List[Any] = remove_space
a_ : int = keep_accents
a_ : str = vocab_file
a_ : Union[str, Any] = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def A ( self ) -> str:
return len(self.sp_model )
def A ( self ) -> str:
a_ : Optional[int] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
a_ : Optional[Any] = self.__dict__.copy()
a_ : int = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Any:
a_ : int = d
a_ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> List[Any]:
a_ : Optional[int] = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def A ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def A ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
a_ : Tuple = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : Union[str, Any] = [self.sep_token_id]
a_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
a_ : Optional[int] = [self.sep_token_id]
a_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
a_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 473
| 0
|
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_UpperCAmelCase : Dict = b * b - 4 * a * c
_UpperCAmelCase : int = (-b + sqrt(lowerCAmelCase )) / (2 * a)
_UpperCAmelCase : List[str] = (-b - sqrt(lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase : str = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 467
|
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = os.path.join(args.tf_model_dir , "parameters.json" )
_UpperCAmelCase : List[str] = json.loads(open(lowerCAmelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(".pt" ):
_UpperCAmelCase : Union[str, Any] = args.output + ".pt"
_UpperCAmelCase : Dict = OrderedDict()
with tf.device("/CPU:0" ):
_UpperCAmelCase : int = tf.train.load_checkpoint(args.tf_model_dir )
_UpperCAmelCase : int = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_UpperCAmelCase : Dict = reader.get_tensor(lowerCAmelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_UpperCAmelCase : int = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_UpperCAmelCase : Any = 8
_UpperCAmelCase : str = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_UpperCAmelCase : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/moe" ):
_UpperCAmelCase : List[str] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_UpperCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/softmlp/kernel" ):
_UpperCAmelCase : List[str] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_UpperCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_UpperCAmelCase : str = key_name[-9:-7]
for i in range(16 ):
_UpperCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_UpperCAmelCase : Optional[int] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/mlp" ):
_UpperCAmelCase : Tuple = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_UpperCAmelCase : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_UpperCAmelCase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p1/bias" ):
_UpperCAmelCase : Optional[Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_UpperCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p2/kernel" ):
_UpperCAmelCase : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_UpperCAmelCase : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/p2/bias" ):
_UpperCAmelCase : int = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_UpperCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/ln" ):
_UpperCAmelCase : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
_UpperCAmelCase : int = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/g" ):
_UpperCAmelCase : Optional[Any] = "model.blocks.%d.feed_forward.norm.weight" % player
_UpperCAmelCase : Union[str, Any] = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/att" ):
_UpperCAmelCase : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_UpperCAmelCase : List[Any] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_UpperCAmelCase : Dict = state[:, 0, :, :]
_UpperCAmelCase : List[Any] = state[:, 1, :, :]
_UpperCAmelCase : Tuple = state[:, 2, :, :]
_UpperCAmelCase : List[str] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : Tuple = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
_UpperCAmelCase : List[Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/o/kernel" ):
_UpperCAmelCase : Dict = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_UpperCAmelCase : Tuple = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/an" ):
_UpperCAmelCase : Optional[int] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_UpperCAmelCase : Any = "model.blocks.%d.self_attn.norm.bias" % player
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name.endswith("/g" ):
_UpperCAmelCase : Dict = "model.blocks.%d.self_attn.norm.weight" % player
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Tuple = torch.tensor(lowerCAmelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_UpperCAmelCase : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_UpperCAmelCase : Dict = "model.%s.weight" % nlayer
_UpperCAmelCase : Optional[Any] = vnp.copy() # same in embedded
_UpperCAmelCase : List[Any] = torch.tensor(lowerCAmelCase )
if key_name.startswith("model/wte" ):
_UpperCAmelCase : List[str] = "lm_head.weight"
_UpperCAmelCase : Optional[int] = vnp.copy() # same in embedded
_UpperCAmelCase : List[str] = torch.tensor(lowerCAmelCase )
elif key_name.startswith("model/wob" ):
_UpperCAmelCase : Dict = "final_logits_bias"
_UpperCAmelCase : Optional[Any] = vnp.copy() # same in embedded
_UpperCAmelCase : str = state.reshape((1, -1) )
_UpperCAmelCase : Dict = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense/kernel":
_UpperCAmelCase : List[Any] = "model.last_project.weight"
_UpperCAmelCase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_UpperCAmelCase : str = torch.tensor(lowerCAmelCase )
elif key_name == "model/dense_1/bias":
_UpperCAmelCase : List[Any] = "model.last_project.bias"
_UpperCAmelCase : Any = vnp.copy() # same because it is one dimensional
_UpperCAmelCase : Any = torch.tensor(lowerCAmelCase )
torch.save(lowerCAmelCase , args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 467
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ["""model.decoder.embed_positions.weights"""]
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> List[Any]:
if "emb" in name:
_A = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_A = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_A = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_A = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_A = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_A = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_A = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_A = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_A = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_A = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_A = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def SCREAMING_SNAKE_CASE_ ( _snake_case :OrderedDict , _snake_case :int ) -> Tuple[Dict, Dict]:
_A = list(state_dict.keys() )
_A = {}
for key in keys:
_A = state_dict.pop(_snake_case )
_A = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
_A = val[:hidden_size, :]
_A = val[hidden_size : 2 * hidden_size, :]
_A = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_A = val
else:
_A = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_A = 1_024
_A = 24
_A = 16
elif checkpoint == "medium":
_A = 1_536
_A = 48
_A = 24
elif checkpoint == "large":
_A = 2_048
_A = 48
_A = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_A = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :int=None , _snake_case :int=None , _snake_case :Optional[int]="cpu" ) -> List[str]:
_A = MusicGen.get_pretrained(_snake_case , device=_snake_case )
_A = decoder_config_from_checkpoint(_snake_case )
_A = fairseq_model.lm.state_dict()
_A , _A = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
_A = TaEncoderModel.from_pretrained('''t5-base''' )
_A = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_A = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_A , _A = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_A = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
_A = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_A = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_A = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_A = AutoTokenizer.from_pretrained('''t5-base''' )
_A = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_A = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
_A = 2_048
_A = 2_048
# set other default generation config params
_A = int(30 * audio_encoder.config.frame_rate )
_A = True
_A = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A_ ) != do_lower_case
or normalizer_state.get("strip_accents" , A_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A_ ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(A_ , normalizer_state.pop("type" ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**A_ )
_UpperCamelCase = do_lower_case
def a ( self , A_ , A_=None ):
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , A_ , A_ = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , A_ , A_ = None ):
_UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 138
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=3 , _lowerCamelCase=224 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , ) ->Dict:
SCREAMING_SNAKE_CASE : Any = size if size is not None else {'''height''': 18, '''width''': 18}
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : int = size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean
SCREAMING_SNAKE_CASE : Any = image_std
def __lowerCAmelCase ( self ) ->Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = ViTImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def __lowerCAmelCase ( self ) ->List[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def __lowerCAmelCase ( self ) ->Tuple:
pass
def __lowerCAmelCase ( self ) ->Union[str, Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) ->Tuple:
# Initialize image_processor
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Dict = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def __lowerCAmelCase ( self ) ->Optional[Any]:
# Initialize image_processor
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 333
|
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=[30, 30] , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.0_2 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=8 , _lowerCamelCase=10 , ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE : int = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
SCREAMING_SNAKE_CASE : Any = n_targets
SCREAMING_SNAKE_CASE : Optional[Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
SCREAMING_SNAKE_CASE : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
SCREAMING_SNAKE_CASE : int = num_patches + 1 + self.num_detection_tokens
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(self.batch_size ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.rand(self.n_targets , 4 , device=_lowerCamelCase )
labels.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) ->int:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = YolosModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[str] = YolosForObjectDetection(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(pixel_values=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values=_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
SCREAMING_SNAKE_CASE : Any = []
for i in range(self.model_tester.batch_size ):
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : List[str] = torch.ones(
size=(self.model_tester.n_targets,) , device=_lowerCamelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE : List[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_lowerCamelCase , dtype=torch.float )
labels.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = labels
return inputs_dict
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Union[str, Any] = YolosModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self ) ->Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) ->Optional[int]:
# YOLOS does not use inputs_embeds
pass
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def __lowerCAmelCase ( self ) ->List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Any = True
# in YOLOS, the seq_len is different
SCREAMING_SNAKE_CASE : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(_lowerCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(_lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self ) ->Optional[int]:
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# YOLOS has a different seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_lowerCamelCase )
@slow
def __lowerCAmelCase ( self ) ->List[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Tuple = YolosModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) ->Dict:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(inputs.pixel_values )
# verify outputs
SCREAMING_SNAKE_CASE : int = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify postprocessing
SCREAMING_SNAKE_CASE : List[str] = image_processor.post_process_object_detection(
_lowerCamelCase , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
SCREAMING_SNAKE_CASE : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = [75, 75, 17, 63, 17]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(_lowerCamelCase )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , _lowerCamelCase , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , _lowerCamelCase )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , _lowerCamelCase ) )
| 333
| 1
|
from ...configuration_utils import PretrainedConfig
A : Optional[int] = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''tapas'''
def __init__(self : Optional[int] , _UpperCAmelCase : int=3_0522 , _UpperCAmelCase : str=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Tuple=3072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=1024 , _UpperCAmelCase : Any=[3, 256, 256, 2, 256, 256, 10] , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[str]=10.0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : List[Any]=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=1.0 , _UpperCAmelCase : str=False , _UpperCAmelCase : int=None , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any="ratio" , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_sizes
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase__ = positive_label_weight
lowercase__ = num_aggregation_labels
lowercase__ = aggregation_loss_weight
lowercase__ = use_answer_as_supervision
lowercase__ = answer_loss_importance
lowercase__ = use_normalized_answer_loss
lowercase__ = huber_loss_delta
lowercase__ = temperature
lowercase__ = aggregation_temperature
lowercase__ = use_gumbel_for_cells
lowercase__ = use_gumbel_for_aggregation
lowercase__ = average_approximation_function
lowercase__ = cell_selection_preference
lowercase__ = answer_loss_cutoff
lowercase__ = max_num_rows
lowercase__ = max_num_columns
lowercase__ = average_logits_per_cell
lowercase__ = select_one_column
lowercase__ = allow_empty_column_selection
lowercase__ = init_cell_selection_weights_to_zero
lowercase__ = reset_position_index_per_cell
lowercase__ = disable_per_token_loss
# Aggregation hyperparameters
lowercase__ = aggregation_labels
lowercase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , _UpperCAmelCase ):
lowercase__ = {int(_UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 15
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
UpperCamelCase : str = "text"
UpperCamelCase : str = "labels"
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE_: List[str] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_: int =self.label_schema.copy()
SCREAMING_SNAKE_CASE_: Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_: Any =label_schema
return task_template
@property
def lowerCamelCase__ ( self : List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 409
| 0
|
def _lowerCAmelCase (_lowerCAmelCase):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = 0
UpperCamelCase_ = len(_lowerCAmelCase) # No of vertices in graph
UpperCamelCase_ = [0] * n
UpperCamelCase_ = [False] * n
def dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = True
UpperCamelCase_ = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , id_)
UpperCamelCase_ = min(low[at] , low[to])
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at))
else:
# This edge is a back edge and cannot be a bridge
UpperCamelCase_ = min(low[at] , low[to])
UpperCamelCase_ = []
for i in range(_lowerCAmelCase):
if not visited[i]:
dfs(_lowerCAmelCase , -1 , _lowerCAmelCase , id_)
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 504
|
import sys
UpperCAmelCase : Union[str, Any] =(
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _lowerCAmelCase (_lowerCAmelCase = N):
UpperCamelCase_ = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase) - 12):
UpperCamelCase_ = 1
for j in range(13):
product *= int(n[i + j])
if product > largest_product:
UpperCamelCase_ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 504
| 1
|
import requests
from bsa import BeautifulSoup
def lowercase ( __A : str = "AAPL" ) -> str:
'''simple docstring'''
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Dict = BeautifulSoup(requests.get(__A ).text , """html.parser""" )
snake_case : Optional[int] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 36
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_snake_case : List[Any] = "\\n Text data.\n Second line of data."
_snake_case : Tuple = "file"
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
__snake_case : Optional[Any] = bytes(__lowerCamelCase , "utf-8" )
with zstd.open(__lowerCamelCase , "wb" ) as f:
f.write(__lowerCamelCase )
return path
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , __lowerCamelCase ) , "w" ) as f:
f.write(__lowerCamelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
__snake_case : str = input_paths[compression_format]
__snake_case : Optional[Any] = tmp_path / "cache"
__snake_case : Optional[int] = DownloadConfig(cache_dir=__lowerCamelCase , extract_compressed_file=__lowerCamelCase )
__snake_case : Union[str, Any] = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
with open(__lowerCamelCase ) as f:
__snake_case : Dict = f.read()
with open(__lowerCamelCase ) as f:
__snake_case : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = "custom_cache"
__snake_case : List[str] = "custom_extracted_dir"
__snake_case : Any = tmp_path / "custom_extracted_path"
if default_extracted:
__snake_case : List[Any] = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCamelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCamelCase ) )
__snake_case : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__snake_case : Optional[int] = xz_file
__snake_case : Optional[int] = (
DownloadConfig(extract_compressed_file=__lowerCamelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCamelCase )
)
__snake_case : str = cached_path(__lowerCamelCase , download_config=__lowerCamelCase )
assert Path(__lowerCamelCase ).parent.parts[-2:] == expected
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : Optional[Any] = str(Path(__lowerCamelCase ).resolve() )
assert cached_path(__lowerCamelCase ) == text_file
# relative path
__snake_case : Any = str(Path(__lowerCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCamelCase ) == text_file
def lowerCAmelCase_ ( __lowerCamelCase ):
# absolute path
__snake_case : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
# relative path
__snake_case : Optional[int] = "./__missing_file__.txt"
with pytest.raises(__lowerCamelCase ):
cached_path(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = get_from_cache(F'tmp://{tmpfs_file}' )
with open(__lowerCamelCase ) as f:
__snake_case : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( ):
with pytest.raises(__lowerCamelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCamelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCamelCase )
with pytest.raises(__lowerCamelCase ):
fsspec_head("s3://huggingface.co" )
| 81
| 0
|
import numpy as np
snake_case__ : Tuple = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _A :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__lowercase = np.array(lowerCamelCase )
def _snake_case ( self : Union[str, Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase , __lowercase = np.where(letter == self.SQUARE )
__lowercase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int ):
'''simple docstring'''
__lowercase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : int , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
__lowercase = message.replace(" " , "" )
__lowercase = message.replace("j" , "i" )
__lowercase = np.empty((2, len(lowerCamelCase )) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape(2 * len(lowerCamelCase ) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[numbers_index * 2] )
__lowercase = int(second_step[(numbers_index * 2) + 1] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = encoded_message + letter
return encoded_message
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
__lowercase = message.lower()
message.replace(" " , "" )
__lowercase = np.empty(2 * len(lowerCamelCase ) )
for letter_index in range(len(lowerCamelCase ) ):
__lowercase = self.letter_to_numbers(message[letter_index] )
__lowercase = numbers[0]
__lowercase = numbers[1]
__lowercase = first_step.reshape((2, len(lowerCamelCase )) )
__lowercase = ""
for numbers_index in range(len(lowerCamelCase ) ):
__lowercase = int(second_step[0, numbers_index] )
__lowercase = int(second_step[1, numbers_index] )
__lowercase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase )
__lowercase = decoded_message + letter
return decoded_message
| 701
|
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__lowercase = gray_code_sequence_string(_SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
__lowercase = int(sequence[i] , 2 )
return sequence
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__lowercase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__lowercase = gray_code_sequence_string(bit_count - 1 )
__lowercase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__lowercase = "0" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__lowercase = "1" + smaller_sequence[i]
sequence.append(_SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655
| 0
|
def UpperCamelCase( __UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ):
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) ,end='''\t''' )
else:
print('''INF''' ,end='''\t''' )
print()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ):
lowerCAmelCase_ : Optional[Any] = [[float('''inf''' ) for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
lowerCAmelCase_ : Union[str, Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a__ ):
# looping through rows of graph array
for i in range(a__ ):
# looping through columns of graph array
for j in range(a__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
lowerCAmelCase_ : Any = dist[i][k] + dist[k][j]
_print_dist(a__ ,a__ )
return dist, v
if __name__ == "__main__":
A__ : str = int(input('''Enter number of vertices: '''))
A__ : List[Any] = int(input('''Enter number of edges: '''))
A__ : int = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
A__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
A__ : int = int(input('''Enter source:'''))
A__ : Tuple = int(input('''Enter destination:'''))
A__ : int = float(input('''Enter weight:'''))
A__ : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 171
|
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681
|
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCamelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCamelCase = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def lowerCamelCase_ ( _lowercase , _lowercase ) -> Tuple:
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
__A : Dict = json.loads(f.read() )
__A : List[Any] = collections.OrderedDict()
__A : Tuple = collections.OrderedDict()
__A : List[Any] = collections.OrderedDict()
with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f:
__A : List[Any] = f.readlines()
__A : Dict = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowerCAmelCase_ ):
__A : Tuple = b
__A : List[Any] = idx
for wd in b:
__A : int = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _a ( _UpperCamelCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|startoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase_ ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
__A : Any = do_clean_text
__A , __A , __A , __A : Union[str, Any] = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ )
__A : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __UpperCAmelCase( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __UpperCAmelCase( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def __UpperCAmelCase( self , __UpperCAmelCase ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[Any] = "".join(lowerCAmelCase_ ).strip()
return out_string
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] )
if len(lowerCAmelCase_ ) > self.model_max_length:
__A : Dict = input_ids[-self.model_max_length :]
return input_ids
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : Tuple = 0
if os.path.isdir(lowerCAmelCase_ ):
__A : Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__A : int = os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
__A : Optional[int] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
__A : Tuple = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
__A : List[str] = token_index
writer.write(",".join(lowerCAmelCase_ ) + "\n" )
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase_ )
return vocab_file, emoji_file
class _a ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[int] = vocab # same as swe
__A : Tuple = ids_to_tokens # same as bpe
__A : int = emoji
__A : int = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] )
__A : str = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
__A : Union[str, Any] = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
__A : Tuple = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
__A : List[Any] = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A : Any = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
__A : Any = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
__A : Union[str, Any] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
__A : str = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
__A : Any = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : List[str] = self.content_repattera.sub("<URL>" , lowerCAmelCase_ )
__A : Dict = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_ )
__A : Optional[int] = self.content_repattera.sub("<TEL>" , lowerCAmelCase_ )
__A : Tuple = self.content_repattera.sub("<DATE>" , lowerCAmelCase_ )
__A : List[str] = self.content_repattera.sub("<DATE>" , lowerCAmelCase_ )
__A : Tuple = self.content_repattera.sub("<PRICE>" , lowerCAmelCase_ )
__A : List[str] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__A : List[str] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase=False ):
__A : Dict = text.replace(" " , "<SP>" )
__A : int = text.replace(" " , "<SP>" )
__A : str = text.replace("\r\n" , "<BR>" )
__A : int = text.replace("\n" , "<BR>" )
__A : str = text.replace("\r" , "<BR>" )
__A : Dict = text.replace("\t" , "<TAB>" )
__A : Union[str, Any] = text.replace("—" , "ー" )
__A : List[str] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
__A : Dict = text.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if clean:
__A : Dict = self.clean_text(lowerCAmelCase_ )
def check_simbol(__UpperCAmelCase ):
__A : Any = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2:
__A : Dict = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(__UpperCAmelCase ):
__A : Any = x.encode()
if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3:
__A : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE28080 and c <= 0XE2B07F:
return True
return False
__A : List[Any] = 0
__A : int = []
while pos < len(lowerCAmelCase_ ):
__A : int = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
__A : Any = [] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
__A : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_ ) > 2:
__A : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase_ ) > 0:
# the smallest token_id is adopted
__A , __A , __A : List[str] = sorted(lowerCAmelCase_ , key=lambda __UpperCAmelCase : x[0] )[0]
result.append(lowerCAmelCase_ )
__A : Union[str, Any] = e
else:
__A : Any = pos + 1
__A : str = text[pos:end]
if check_simbol(lowerCAmelCase_ ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase_ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
__A : Optional[Any] = end
return result
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase="\n" ):
__A : List[str] = []
__A : Union[str, Any] = []
__A : Union[str, Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode("utf-8" , errors="replace" ) )
__A : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase_ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
words.append(bytearray(lowerCAmelCase_ ).decode("utf-8" , errors="replace" ) )
__A : Optional[int] = "".join(lowerCAmelCase_ )
return text
| 520
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 0
|
'''simple docstring'''
import math
def A__ ( A : list , A : int):
'''simple docstring'''
UpperCamelCase : List[Any] = len(A)
UpperCamelCase : int = int(math.floor(math.sqrt(A)))
UpperCamelCase : Optional[int] = 0
while arr[min(A , A) - 1] < x:
UpperCamelCase : List[Any] = step
step += int(math.floor(math.sqrt(A)))
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase : Dict = prev + 1
if prev == min(A , A):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase_ = [int(item) for item in user_input.split(',')]
lowerCAmelCase_ = int(input('Enter the number to be searched:\n'))
lowerCAmelCase_ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 711
|
'''simple docstring'''
def A__ ( A : str , A : str):
'''simple docstring'''
if not (isinstance(A , A) and isinstance(A , A)):
raise ValueError("longest_common_substring() takes two strings for inputs")
UpperCamelCase : Optional[int] = len(A)
UpperCamelCase : int = len(A)
UpperCamelCase : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1)]
UpperCamelCase : Any = 0
UpperCamelCase : Any = 0
for i in range(1 , texta_length + 1):
for j in range(1 , texta_length + 1):
if texta[i - 1] == texta[j - 1]:
UpperCamelCase : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCamelCase : Optional[int] = i
UpperCamelCase : int = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 435
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_2_8, '''min_length''': 1_2, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_4_2, '''min_length''': 5_6, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 6_2, '''min_length''': 1_1, '''num_beams''': 6},
}
}
UpperCamelCase = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_2_8,
'''task_specific_params.summarization.min_length''': 1_2,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_4_2,
'''task_specific_params.summarization_cnn.min_length''': 5_6,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 6_2,
'''task_specific_params.summarization_xsum.min_length''': 1_1,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase_) , lowerCamelCase_)
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , x.transpose()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> Any:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , transpose(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , transpose(lowerCamelCase_ , axes=(1, 2, 0)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Dict:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_) , np.asarray(transpose(lowerCamelCase_))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(transpose(lowerCamelCase_ , axes=(1, 2, 0)) , np.asarray(transpose(lowerCamelCase_ , axes=(1, 2, 0)))))
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.reshape(lowerCamelCase_ , (4, 3))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.reshape(lowerCamelCase_ , (1_2, 5))))
@require_torch
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , reshape(lowerCamelCase_ , (4, 3)).numpy()))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , reshape(lowerCamelCase_ , (1_2, 5)).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (4, 3)) , np.asarray(reshape(lowerCamelCase_ , (4, 3)))))
UpperCamelCase = np.random.randn(3 , 4 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(reshape(lowerCamelCase_ , (1_2, 5)) , np.asarray(reshape(lowerCamelCase_ , (1_2, 5)))))
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.squeeze(lowerCamelCase_)))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.squeeze(lowerCamelCase_ , axis=2)))
@require_torch
def UpperCAmelCase__ ( self) -> int:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , squeeze(lowerCamelCase_).numpy()))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , squeeze(lowerCamelCase_ , axis=2).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> str:
UpperCamelCase = np.random.randn(1 , 3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_) , np.asarray(squeeze(lowerCamelCase_))))
UpperCamelCase = np.random.randn(1 , 4 , 1 , 5)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(squeeze(lowerCamelCase_ , axis=2) , np.asarray(squeeze(lowerCamelCase_ , axis=2))))
def UpperCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.expand_dims(lowerCamelCase_ , axis=1)))
@require_torch
def UpperCAmelCase__ ( self) -> List[str]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = torch.tensor(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_tf
def UpperCAmelCase__ ( self) -> List[Any]:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = tf.constant(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , expand_dims(lowerCamelCase_ , axis=1).numpy()))
@require_flax
def UpperCAmelCase__ ( self) -> Tuple:
UpperCamelCase = np.random.randn(3 , 4)
UpperCamelCase = jnp.array(lowerCamelCase_)
self.assertTrue(np.allclose(expand_dims(lowerCamelCase_ , axis=1) , np.asarray(expand_dims(lowerCamelCase_ , axis=1))))
| 34
|
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
if index == r:
for j in range(UpperCAmelCase_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Optional[Any] = arr[i]
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 , UpperCAmelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ :int = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 522
| 0
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = GPTSanJapaneseTokenizer
lowercase_ = False
lowercase_ = {"""do_clean_text""": False, """add_prefix_space""": False}
def __UpperCamelCase ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
__A =['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
__A ={'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
__A ={'''unk_token''': '''<unk>'''}
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__A =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase__ ) )
def __UpperCamelCase ( self , **lowercase__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A ='''こんにちは、世界。 \nこんばんは、㔺界。😀'''
__A ='''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def __UpperCamelCase ( self , lowercase__ ):
'''simple docstring'''
__A , __A =self.get_input_output_texts(lowercase__ )
__A =tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__A =tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__ )
return text, ids
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
pass # TODO add if relevant
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
# Testing tokenization
__A ='''こんにちは、世界。 こんばんは、㔺界。'''
__A =['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
__A =tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids without special tokens
__A =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__A =tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids with special tokens
__A =tokens + [tokenizer.unk_token]
__A =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9]
__A =tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_tokenizer()
# Testing tokenization
__A ='''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
__A ='''こんにちは、、、、世界。こんばんは、、、、世界。'''
__A =tokenizer.encode(lowercase__ )
__A =tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A ='''こんにちは、世界。'''
__A ='''こんばんは、㔺界。😀'''
__A ='''こんにちは、世界。こんばんは、世界。😀'''
__A =tokenizer.encode(prefix_text + input_text )
__A =tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
__A =tokenizer.encode(lowercase__ , prefix_text=lowercase__ )
__A =tokenizer.decode(lowercase__ )
__A =tokenizer.decode(lowercase__ )
__A =tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
__A ='''こんにちは、世界。'''
__A ='''こんばんは、㔺界。😀'''
__A =len(tokenizer.encode(lowercase__ ) ) - 2
__A =len(tokenizer.encode(lowercase__ ) ) - 2
__A =[1] + [0] * (len_prefix + len_text + 1)
__A =[1] * (len_prefix + len_text + 1) + [0]
__A =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
__A =tokenizer(prefix_text + input_text ).token_type_ids
__A =tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
__A =tokenizer(lowercase__ , prefix_text=lowercase__ ).token_type_ids
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A =tokenizer.encode('''あンいワ''' )
__A =tokenizer.encode('''''' , prefix_text='''あンいワ''' )
__A =tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(lowercase__ ) , tokenizer.decode(lowercase__ ) )
self.assertEqual(tokenizer.decode(lowercase__ ) , tokenizer.decode(lowercase__ ) )
self.assertNotEqual(lowercase__ , lowercase__ )
self.assertNotEqual(lowercase__ , lowercase__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
__A =[['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
__A =tokenizer(lowercase__ , padding=lowercase__ )
__A =tokenizer.batch_encode_plus(lowercase__ , padding=lowercase__ )
# fmt: off
__A =[[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]]
__A =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__A =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase__ )
self.assertListEqual(x_token.token_type_ids , lowercase__ )
self.assertListEqual(x_token.attention_mask , lowercase__ )
self.assertListEqual(x_token_a.input_ids , lowercase__ )
self.assertListEqual(x_token_a.token_type_ids , lowercase__ )
self.assertListEqual(x_token_a.attention_mask , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
def __UpperCamelCase ( self ):
'''simple docstring'''
pass
| 516
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Dict = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ['''ViTFeatureExtractor''']
_lowerCamelCase : int = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 516
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'sshleifer/tiny-gpt2'
_snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Dict = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = 'sgugger/tiny-distilbert-classification'
_snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , only_pretrain_model=lowerCamelCase_ , )
_snake_case : Union[str, Any] = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = 'sshleifer/tiny-gpt2'
_snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , torchscript=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : List[str] = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = 'sshleifer/tiny-gpt2'
_snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , fpaa=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Union[str, Any] = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = 'sshleifer/tiny-gpt2'
_snake_case : int = AutoConfig.from_pretrained(lowerCamelCase_ )
# set architectures equal to `None`
_snake_case : Tuple = None
_snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Union[str, Any] = PyTorchBenchmark(lowerCamelCase_ , configs=[config] )
_snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Any = 'sshleifer/tiny-gpt2'
_snake_case : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Any = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : int = 'sshleifer/tiny-gpt2'
_snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
_snake_case : List[Any] = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = 'sshleifer/tiny-gpt2'
_snake_case : int = AutoConfig.from_pretrained(lowerCamelCase_ )
_snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : str = PyTorchBenchmark(lowerCamelCase_ , configs=[config] )
_snake_case : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = 'sshleifer/tinier_bart'
_snake_case : Optional[Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Optional[int] = PyTorchBenchmark(lowerCamelCase_ , configs=[config] )
_snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = 'sshleifer/tiny-gpt2'
_snake_case : str = AutoConfig.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Any = PyTorchBenchmark(lowerCamelCase_ , configs=[config] )
_snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Dict = 'sshleifer/tinier_bart'
_snake_case : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase_ , )
_snake_case : Dict = PyTorchBenchmark(lowerCamelCase_ , configs=[config] )
_snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , save_to_csv=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowerCamelCase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowerCamelCase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowerCamelCase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowerCamelCase_ , 'env.csv' ) , multi_process=lowerCamelCase_ , )
_snake_case : List[str] = PyTorchBenchmark(lowerCamelCase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowerCamelCase_ : str ):
self.assertTrue(hasattr(lowerCamelCase_ , 'sequential' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'current' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase_ , inference=lowerCamelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase_ , 'log.txt' ) , log_print=lowerCamelCase_ , trace_memory_line_by_line=lowerCamelCase_ , multi_process=lowerCamelCase_ , )
_snake_case : Optional[Any] = PyTorchBenchmark(lowerCamelCase_ )
_snake_case : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase_ , 'log.txt' ) ).exists() )
| 304
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 304
| 1
|
__lowerCAmelCase = 8.314462 # Unit - J mol-1 K-1
def _lowercase ( a__ : float , a__ : float , a__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _lowercase ( a__ : float , a__ : float , a__ : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 589
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=64 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=1 , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = q_groups
_UpperCamelCase = k_groups
_UpperCamelCase = v_groups
_UpperCamelCase = post_attention_groups
_UpperCamelCase = intermediate_groups
_UpperCamelCase = output_groups
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.num_labels
_UpperCamelCase = SqueezeBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = self.num_choices
_UpperCamelCase = SqueezeBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowercase , lowercase , unittest.TestCase ):
__lowercase : Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowercase : Dict = (
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : Tuple = False
__lowercase : List[str] = True
__lowercase : Any = False
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , dim=37 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase_ )
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase_ )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase_ )
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = SqueezeBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
_UpperCamelCase = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_UpperCamelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_UpperCamelCase = model(lowerCamelCase_ )[0]
_UpperCamelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCamelCase_ )
_UpperCamelCase = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-4 ) )
| 589
| 1
|
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Any = False
def A_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
_UpperCamelCase = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_UpperCamelCase = dict(zip(a , range(len(a ) ) ) )
_UpperCamelCase = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a ) + """\n""" )
def A_ ( self , a , a=False , a=20 , a=5 ) -> Tuple[str, list]:
'''simple docstring'''
_UpperCamelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a )) for i in range(len(a ) )]
_UpperCamelCase = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a ) , a ) )
if max_length is not None and len(a ) > max_length:
_UpperCamelCase = toks[:max_length]
if min_length is not None and len(a ) < min_length and len(a ) > 0:
while len(a ) < min_length:
_UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
_UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
_UpperCamelCase = tokenizer.decode(a , clean_up_tokenization_spaces=a )
if " " not in output_txt and len(a ) > 1:
_UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a )
)
if with_prefix_space:
_UpperCamelCase = """ """ + output_txt
_UpperCamelCase = tokenizer.encode(a , add_special_tokens=a )
return output_txt, output_ids
def A_ ( self , **a ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_UpperCamelCase = tokenizer("""m xxx ɪ""" , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_UpperCamelCase = tokenizer("""m aaa ɪ ccc""" , do_phonemize=a ).input_ids
self.assertEqual(a , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
_UpperCamelCase = tokenizer("""maɪ c""" , do_phonemize=a ).input_ids
self.assertEqual(a , [3, 2_00] ) # mai should be <unk> (=3)
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
self.assertEqual(a , """h ə l oʊ h aʊ ɑːɹ j uː""" )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
_UpperCamelCase = tokenizer.decode(tokenizer(a ).input_ids )
self.assertEqual(a , a )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_UpperCamelCase = tokenizer.decode(sample_ids[0] )
_UpperCamelCase = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
self.assertEqual(a , """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a ).input_ids , tokenizer(a , do_phonemize=a ).input_ids )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_UpperCamelCase = tokenizer.decode(sample_ids[0] )
_UpperCamelCase = tokenizer.batch_decode(a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_UpperCamelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a )
_UpperCamelCase = tokenizer.batch_decode(a , filter_word_delimiter_token=a )
self.assertEqual(a , batch_tokens[0] )
self.assertEqual(a , ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
_UpperCamelCase = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(a , a )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer.phonemize(a , phonemizer_lang="""en-us""" )
_UpperCamelCase = tokenizer.decode(tokenizer(a ).input_ids , filter_word_delimiter_token=a )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip() , a )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""" , word_delimiter_token=a )
_UpperCamelCase = """Hello how are you"""
_UpperCamelCase = tokenizer(a , phonemizer_lang="""en-us""" ).input_ids
_UpperCamelCase = tokenizer(a , phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a , a )
_UpperCamelCase = tokenizer.decode(a )
_UpperCamelCase = tokenizer.decode(a )
self.assertEqual(a , """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a , """ɛ l o h aʊ a ʁ j u""" )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_UpperCamelCase = """Hello how Are you"""
_UpperCamelCase = """hello how are you"""
_UpperCamelCase = tokenizer(a ).input_ids
_UpperCamelCase = tokenizer(a ).input_ids
self.assertEqual(a , a )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
_UpperCamelCase = tokenizer.batch_decode(a )
self.assertEqual(a , ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def A_ ( a , a ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [d[key] for d in offsets]
return retrieved_list
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_UpperCamelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_UpperCamelCase = tokenizer.decode(a , output_char_offsets=a , filter_word_delimiter_token=a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a , a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """char""" ) , ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """start_offset""" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""] , """end_offset""" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a , a ):
self.assertTrue(isinstance(a , a ) )
self.assertTrue(isinstance(outputs_list[0] , a ) )
# transform list to ModelOutput
_UpperCamelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""] , outputs_batch_a["""text"""] )
def recursive_check(a , a ):
if isinstance(a , a ):
[recursive_check(a , a ) for la, la in zip(a , a )]
self.assertEqual(a , a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""] , outputs_batch_a["""char_offsets"""] )
# fmt: off
_UpperCamelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_UpperCamelCase = tokenizer.batch_decode(a , output_char_offsets=a )
_UpperCamelCase = [tokenizer.decode(a , output_char_offsets=a ) for ids in sample_ids]
check_list_tuples_equal(a , a )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def A_ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def A_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(a )
self.assertNotEqual(a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_UpperCamelCase = tokenizer.add_tokens(a )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size + len(a ) )
_UpperCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_UpperCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_UpperCamelCase = tokenizer.add_special_tokens(a )
_UpperCamelCase = tokenizer.vocab_size
_UpperCamelCase = len(a )
self.assertNotEqual(a , 0 )
self.assertEqual(a , a )
self.assertEqual(a , len(a ) )
self.assertEqual(a , all_size_a + len(a ) )
_UpperCamelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=a )
self.assertGreaterEqual(len(a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.get_tokenizers(fast=a , do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_UpperCamelCase = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_UpperCamelCase = tokenizer.convert_tokens_to_string(a )
self.assertIsInstance(output["""text"""] , a )
| 612
|
from __future__ import annotations
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = sum(lowerCAmelCase )
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return result
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
"""simple docstring"""
if sum(lowerCAmelCase ) > max_sum or (remaining_nums_sum + sum(lowerCAmelCase )) < max_sum:
return
if sum(lowerCAmelCase ) == max_sum:
result.append(lowerCAmelCase )
return
for index in range(lowerCAmelCase , len(lowerCAmelCase ) ):
create_state_space_tree(
lowerCAmelCase , lowerCAmelCase , index + 1 , [*path, nums[index]] , lowerCAmelCase , remaining_nums_sum - nums[index] , )
lowerCamelCase__ = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ = 9
lowerCamelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 612
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "cvt"
def __init__( self :Any , __A :List[str]=3 , __A :Any=[7, 3, 3] , __A :Optional[Any]=[4, 2, 2] , __A :Union[str, Any]=[2, 1, 1] , __A :List[str]=[64, 192, 384] , __A :int=[1, 3, 6] , __A :Any=[1, 2, 10] , __A :Optional[int]=[4.0, 4.0, 4.0] , __A :List[Any]=[0.0, 0.0, 0.0] , __A :Tuple=[0.0, 0.0, 0.0] , __A :Any=[0.0, 0.0, 0.1] , __A :Any=[True, True, True] , __A :Any=[False, False, True] , __A :Dict=["dw_bn", "dw_bn", "dw_bn"] , __A :Optional[Any]=[3, 3, 3] , __A :Dict=[1, 1, 1] , __A :List[str]=[2, 2, 2] , __A :Dict=[1, 1, 1] , __A :int=[1, 1, 1] , __A :Any=0.0_2 , __A :Optional[Any]=1E-12 , **__A :Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = patch_sizes
SCREAMING_SNAKE_CASE__ = patch_stride
SCREAMING_SNAKE_CASE__ = patch_padding
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = depth
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = attention_drop_rate
SCREAMING_SNAKE_CASE__ = drop_rate
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = cls_token
SCREAMING_SNAKE_CASE__ = qkv_projection_method
SCREAMING_SNAKE_CASE__ = kernel_qkv
SCREAMING_SNAKE_CASE__ = padding_kv
SCREAMING_SNAKE_CASE__ = stride_kv
SCREAMING_SNAKE_CASE__ = padding_q
SCREAMING_SNAKE_CASE__ = stride_q
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
| 703
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :List[Any] , *__A :Tuple , **__A :Dict ) -> None:
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 59
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A: Dict = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[str] = PegasusTokenizer
__lowerCAmelCase : Optional[int] = PegasusTokenizerFast
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Union[str, Any] = True
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[Any] = PegasusTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] = """</s>"""
UpperCAmelCase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1103 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : List[Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
UpperCAmelCase : str = py_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase : Union[str, Any] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase : Dict = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Any = tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
UpperCAmelCase : Union[str, Any] = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase : int = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
UpperCAmelCase : Optional[int] = tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] = ["""This is going to be way too long.""" * 150, """short example"""]
UpperCAmelCase : List[Any] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Optional[Any] = self._large_tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCAmelCase : str = self._large_tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=5 , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_SCREAMING_SNAKE_CASE ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Optional[Any] = PegasusTokenizer
__lowerCAmelCase : Optional[int] = PegasusTokenizerFast
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : Dict = True
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Any = PegasusTokenizer(_SCREAMING_SNAKE_CASE , offset=0 , mask_token_sent=_SCREAMING_SNAKE_CASE , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase : int = rust_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
UpperCAmelCase : Dict = py_tokenizer([raw_input_str] , return_tensors=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ["""This is going to be way too long.""" * 1000, """short example"""]
UpperCAmelCase : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase : Dict = self._large_tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCAmelCase : str = self._large_tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=5 , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_SCREAMING_SNAKE_CASE ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase : Tuple = self._large_tokenizer(_SCREAMING_SNAKE_CASE ).input_ids
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 160
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
A: Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 160
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = '''fnet'''
def __init__( self , UpperCamelCase_=3_2000 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=4 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=False , UpperCamelCase_=512 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = vocab_size
__magic_name__ = max_position_embeddings
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = type_vocab_size
__magic_name__ = layer_norm_eps
__magic_name__ = use_tpu_fourier_optimizations
__magic_name__ = tpu_short_seq_length
| 190
| 0
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
if index == r:
for j in range(_SCREAMING_SNAKE_CASE ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase_ : Union[str, Any] = arr[i]
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowerCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 71
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A_ ( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : dict ) -> PegasusForConditionalGeneration:
__SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Tuple = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : List[Any] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = array
return tf_weights
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
# save tokenizer first
__SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ).parent.name
__SCREAMING_SNAKE_CASE : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
__SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE : Dict = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_A = parser.parse_args()
if args.save_dir is None:
_A = Path(args.tf_ckpt_path).parent.name
_A = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158
| 0
|
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , a__ : Optional[int] , a__ : int=13 , a__ : Tuple=7 , a__ : Dict=True , a__ : Union[str, Any]=True , a__ : Any=True , a__ : Tuple=True , a__ : Optional[Any]=99 , a__ : List[str]=64 , a__ : Optional[int]=32 , a__ : str=5 , a__ : List[str]=4 , a__ : List[Any]=37 , a__ : Optional[Any]="gelu" , a__ : str=0.1 , a__ : str=0.1 , a__ : Tuple=512 , a__ : Any=16 , a__ : Dict=2 , a__ : Union[str, Any]=0.02 , a__ : Optional[int]=3 , a__ : Any=4 , a__ : Optional[int]=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = embedding_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def __snake_case ( self : str ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __snake_case ( self : Union[str, Any] , a__ : Any , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : List[str] , a__ : str , a__ : Optional[Any] ):
UpperCAmelCase = MegatronBertModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ )
UpperCAmelCase = model(a__ , token_type_ids=a__ )
UpperCAmelCase = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : Dict , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Tuple , a__ : str , a__ : Union[str, Any] ):
UpperCAmelCase = MegatronBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Optional[Any] , a__ : Optional[int] , a__ : str , a__ : Union[str, Any] , a__ : Optional[int] , a__ : str , a__ : Optional[int] , a__ : Tuple ):
UpperCAmelCase = MegatronBertForCausalLM(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : str , a__ : Tuple , a__ : int , a__ : str , a__ : Optional[Any] , a__ : Tuple , a__ : Tuple , a__ : Optional[Any] ):
UpperCAmelCase = MegatronBertForNextSentencePrediction(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self : Optional[Any] , a__ : int , a__ : str , a__ : List[Any] , a__ : List[Any] , a__ : Optional[int] , a__ : Dict , a__ : Tuple ):
UpperCAmelCase = MegatronBertForPreTraining(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , next_sentence_label=a__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self : str , a__ : Tuple , a__ : Optional[int] , a__ : Union[str, Any] , a__ : List[str] , a__ : str , a__ : Union[str, Any] , a__ : Optional[int] ):
UpperCAmelCase = MegatronBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : str , a__ : Dict , a__ : Tuple , a__ : Any , a__ : Tuple , a__ : List[str] , a__ : Optional[Any] , a__ : Optional[int] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MegatronBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Tuple , a__ : str , a__ : List[str] , a__ : List[Any] , a__ : Union[str, Any] , a__ : int , a__ : Any , a__ : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MegatronBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Any , a__ : Dict , a__ : Union[str, Any] , a__ : Tuple , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : Union[str, Any] ):
UpperCAmelCase = self.num_choices
UpperCAmelCase = MegatronBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : str ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
), (
UpperCAmelCase
),
) = config_and_inputs
UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase =(
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase =True
# test_resize_embeddings = False
_lowerCamelCase =False
def __snake_case ( self : Any , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any]=False ):
UpperCAmelCase = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class in get_values(a__ ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a__ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a__ )
return inputs_dict
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = MegatronBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*a__ )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*a__ )
def __snake_case ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*a__ )
def __snake_case ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*a__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
a__ : Tuple = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def __snake_case ( self : Any ):
UpperCAmelCase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
UpperCAmelCase = os.path.join(os.environ['''MYDIR'''] , a__ )
UpperCAmelCase = MegatronBertModel.from_pretrained(a__ )
model.to(a__ )
model.half()
UpperCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase = model(a__ )[0]
UpperCAmelCase = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , a__ )
UpperCAmelCase = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase = output[0, ii, jj]
UpperCAmelCase = expected[3 * ii + jj]
UpperCAmelCase = '''ii={} jj={} a={} b={}'''.format(a__ , a__ , a__ , a__ )
self.assertTrue(math.isclose(a__ , a__ , rel_tol=a__ , abs_tol=a__ ) , msg=a__ )
| 705
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
if latents is None:
UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(a__ )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[Any] , a__ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __snake_case ( self : Union[str, Any] , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : Union[str, Any] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : torch.FloatTensor , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = hint.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCAmelCase = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(a__ , force_not_quantize=a__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 570
| 0
|
from __future__ import annotations
def lowercase__ ( A_: list[int] , A_: list[int] , A_: list[int] , A_: list[list[str]] , A_: int , ) -> None:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A_ , A_ , )
def lowercase__ ( A_: int ) -> None:
"""simple docstring"""
__UpperCAmelCase =[]
depth_first_search([] , [] , [] , A_ , A_ )
# Print all the boards
for board in boards:
for column in board:
print(A_ )
print("""""" )
print(len(A_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 68
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowercase__( A , A=None ):
require_version(deps[pkg] , A )
| 170
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=100 , UpperCAmelCase : int=13 , UpperCAmelCase : Tuple=30 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[Any]=10 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : Optional[int]=3 , ):
__lowerCamelCase : Dict = parent
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Union[str, Any] = patch_size
__lowerCamelCase : Any = num_channels
__lowerCamelCase : Union[str, Any] = is_training
__lowerCamelCase : Optional[Any] = use_labels
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : str = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = type_sequence_label_size
__lowerCamelCase : List[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : Optional[int] = (image_size // patch_size) ** 2
__lowerCamelCase : Dict = num_patches + 1
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Tuple = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Tuple = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : str = FlaxBeitModel(config=UpperCAmelCase )
__lowerCamelCase : int = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : int = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase )
__lowerCamelCase : Any = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Dict = FlaxBeitForImageClassification(config=UpperCAmelCase )
__lowerCamelCase : Optional[int] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : List[str] = FlaxBeitForImageClassification(UpperCAmelCase )
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : Any = model(UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = config_and_inputs
__lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = FlaxBeitModelTester(self )
__lowerCamelCase : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Optional[Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : int = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : str , **UpperCAmelCase : Dict ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__lowerCamelCase : Union[str, Any] = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowerCamelCase : List[str] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Any ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowerCamelCase : Optional[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> int:
'''simple docstring'''
__lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@require_flax
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : List[str] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowerCamelCase : str = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowerCamelCase : Tuple = np.ones((1, 196) , dtype=UpperCAmelCase )
# forward pass
__lowerCamelCase : Any = model(pixel_values=UpperCAmelCase , bool_masked_pos=UpperCAmelCase )
__lowerCamelCase : List[str] = outputs.logits
# verify the logits
__lowerCamelCase : int = (1, 196, 8192)
self.assertEqual(logits.shape , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCAmelCase , atol=1E-2 ) )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : List[Any] = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : List[str] = prepare_img()
__lowerCamelCase : Any = image_processor(images=UpperCAmelCase , return_tensors="np" )
# forward pass
__lowerCamelCase : List[str] = model(**UpperCAmelCase )
__lowerCamelCase : List[Any] = outputs.logits
# verify the logits
__lowerCamelCase : Optional[Any] = (1, 1000)
self.assertEqual(logits.shape , UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase : Optional[int] = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
@slow
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Any = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=UpperCAmelCase , return_tensors="np" )
# forward pass
__lowerCamelCase : Any = model(**UpperCAmelCase )
__lowerCamelCase : Any = outputs.logits
# verify the logits
__lowerCamelCase : str = (1, 21841)
self.assertEqual(logits.shape , UpperCAmelCase )
__lowerCamelCase : Tuple = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
__lowerCamelCase : Optional[int] = 2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCAmelCase )
| 366
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _snake_case ( a__ , a__ ):
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 128 , UpperCAmelCase : int = 256 , UpperCAmelCase : float = 2_0_0_0.0 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 2048 , UpperCAmelCase : float = 0.1 , ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Sequential(
nn.Linear(UpperCAmelCase , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCAmelCase ) , nn.SiLU() , )
__lowerCamelCase : str = nn.Embedding(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : str = nn.ModuleList()
for lyr_num in range(UpperCAmelCase ):
# FiLM conditional T5 decoder
__lowerCamelCase : List[str] = DecoderLayer(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
self.decoders.append(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Dropout(p=UpperCAmelCase )
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int ):
__lowerCamelCase : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__lowerCamelCase : str = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__lowerCamelCase : Optional[int] = self.conditioning_emb(UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__lowerCamelCase : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__lowerCamelCase : int = torch.broadcast_to(
torch.arange(UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__lowerCamelCase : Optional[Any] = self.position_encoding(UpperCAmelCase )
__lowerCamelCase : List[str] = self.continuous_inputs_projection(UpperCAmelCase )
inputs += position_encodings
__lowerCamelCase : List[Any] = self.dropout(UpperCAmelCase )
# decoder: No padding present.
__lowerCamelCase : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__lowerCamelCase : Optional[Any] = [(x, self.encoder_decoder_mask(UpperCAmelCase , UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__lowerCamelCase : Union[str, Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__lowerCamelCase : List[Any] = lyr(
UpperCAmelCase , conditioning_emb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )[0]
__lowerCamelCase : Dict = self.decoder_norm(UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.post_dropout(UpperCAmelCase )
__lowerCamelCase : str = self.spec_out(UpperCAmelCase )
return spec_out
class _snake_case ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCAmelCase , d_kv=UpperCAmelCase , num_heads=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase , layer_norm_epsilon=UpperCAmelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , ):
__lowerCamelCase : Union[str, Any] = self.layer[0](
UpperCAmelCase , conditioning_emb=UpperCAmelCase , attention_mask=UpperCAmelCase , )
if encoder_hidden_states is not None:
__lowerCamelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__lowerCamelCase : Dict = self.layer[1](
UpperCAmelCase , key_value_states=UpperCAmelCase , attention_mask=UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
__lowerCamelCase : List[Any] = self.layer[-1](UpperCAmelCase , UpperCAmelCase )
return (hidden_states,)
class _snake_case ( nn.Module ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ):
super().__init__()
__lowerCamelCase : Union[str, Any] = TaLayerNorm(UpperCAmelCase )
__lowerCamelCase : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : Tuple = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , ):
# pre_self_attention_layer_norm
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Optional[Any] = self.FiLMLayer(UpperCAmelCase , UpperCAmelCase )
# Self-attention block
__lowerCamelCase : Optional[Any] = self.attention(UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
super().__init__()
__lowerCamelCase : str = Attention(query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , out_bias=UpperCAmelCase , scale_qk=UpperCAmelCase )
__lowerCamelCase : List[Any] = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[int]=None , ):
__lowerCamelCase : str = self.layer_norm(UpperCAmelCase )
__lowerCamelCase : Dict = self.attention(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
__lowerCamelCase : List[str] = hidden_states + self.dropout(UpperCAmelCase )
return layer_output
class _snake_case ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : str = TaDenseGatedActDense(d_model=UpperCAmelCase , d_ff=UpperCAmelCase , dropout_rate=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCAmelCase )
__lowerCamelCase : Dict = TaLayerNorm(UpperCAmelCase , eps=UpperCAmelCase )
__lowerCamelCase : Optional[Any] = nn.Dropout(UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
__lowerCamelCase : int = self.layer_norm(UpperCAmelCase )
if conditioning_emb is not None:
__lowerCamelCase : Union[str, Any] = self.film(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = self.DenseReluDense(UpperCAmelCase )
__lowerCamelCase : Optional[int] = hidden_states + self.dropout(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : List[Any] = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
__lowerCamelCase : str = nn.Dropout(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = NewGELUActivation()
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Union[str, Any] = self.act(self.wi_a(UpperCAmelCase ) )
__lowerCamelCase : Any = self.wi_a(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_gelu * hidden_linear
__lowerCamelCase : Any = self.dropout(UpperCAmelCase )
__lowerCamelCase : List[Any] = self.wo(UpperCAmelCase )
return hidden_states
class _snake_case ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=1E-6 ):
super().__init__()
__lowerCamelCase : List[Any] = nn.Parameter(torch.ones(UpperCAmelCase ) )
__lowerCamelCase : Tuple = eps
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Any ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
__lowerCamelCase : int = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCAmelCase )
__lowerCamelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__lowerCamelCase : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _snake_case ( nn.Module ):
def lowerCamelCase__ ( self : str , UpperCAmelCase : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(UpperCAmelCase , 3.0 )) ))
class _snake_case ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ):
super().__init__()
__lowerCamelCase : Any = nn.Linear(UpperCAmelCase , out_features * 2 , bias=UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
__lowerCamelCase : Optional[Any] = self.scale_bias(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : Dict = torch.chunk(UpperCAmelCase , 2 , -1 )
__lowerCamelCase : List[Any] = x * (1 + scale) + shift
return x
| 366
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''image_processor''', '''tokenizer''']
__A = '''ViTImageProcessor'''
__A = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Union[str, Any] , lowercase_ : Optional[int]=None , lowercase_ : Dict=None , **lowercase_ : List[str]) -> str:
"""simple docstring"""
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_UpperCamelCase = kwargs.pop("feature_extractor")
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(lowercase_ , lowercase_)
def __call__( self : Any , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , **lowercase_ : Dict) -> int:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images.")
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.")
if text is not None:
_UpperCamelCase = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if visual_prompt is not None:
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
_UpperCamelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if visual_prompt is not None and images is not None:
_UpperCamelCase = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def __UpperCAmelCase ( self : Optional[Any] , *lowercase_ : List[Any] , **lowercase_ : List[Any]) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : int) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 547
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "fnet"
def __init__( self , UpperCamelCase__=32_000 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=4 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=False , UpperCamelCase__=512 , UpperCamelCase__=3 , UpperCamelCase__=1 , UpperCamelCase__=2 , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = use_tpu_fourier_optimizations
lowerCamelCase_ = tpu_short_seq_length
| 66
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="None" , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = relative_attention
lowerCamelCase_ = position_biased_input
lowerCamelCase_ = pos_att_type
lowerCamelCase_ = scope
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
lowerCamelCase_ = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DebertaVaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = DebertaVaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
lowerCamelCase_ = DebertaVaForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__lowercase :Optional[Any] = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase :Optional[int] = True
__lowercase :Any = False
__lowercase :Dict = False
__lowercase :Optional[Any] = False
__lowercase :Union[str, Any] = False
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCamelCase__ )
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = DebertaVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
@slow
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCamelCase_ = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 66
| 1
|
import math
def _lowerCAmelCase ( lowerCAmelCase_ :int )->int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(lowerCAmelCase_ )
if number < 1:
snake_case_ = F'''Input value of [number={number}] must be > 0'''
raise ValueError(lowerCAmelCase_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
snake_case_ = int(math.log(number // 3 , 2 ) ) + 2
snake_case_ = [3, 5]
snake_case_ = 2
snake_case_ = 3
for block in range(1 , lowerCAmelCase_ ):
for _ in range(lowerCAmelCase_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
SCREAMING_SNAKE_CASE :Any = 0
try:
SCREAMING_SNAKE_CASE :int = proth(number)
except ValueError:
print(F'''ValueError: there is no {number}th Proth number''')
continue
print(F'''The {number}th Proth number: {value}''')
| 283
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self : int , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCAmelCase )
snake_case_ = self.values[key]
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (
sum(self.charge_factor - len(_lowerCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]=None ) -> int:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCAmelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCAmelCase , _lowerCAmelCase )
| 283
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowerCamelCase : int = logging.getLogger(__name__)
_lowerCamelCase : List[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_lowerCamelCase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = field(
default=__snake_case , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__snake_case )} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCAmelCase = field(
default=__snake_case , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__UpperCAmelCase = field(default=__snake_case , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__UpperCAmelCase = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__UpperCAmelCase = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__UpperCAmelCase = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__UpperCAmelCase = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__UpperCAmelCase = field(
default=__snake_case , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowerCAmelCase ( __a , __a , __a = False , __a = None , ) -> Tuple:
'''simple docstring'''
def _dataset(__a , __a=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , ref_path=__a , )
return LineByLineTextDataset(tokenizer=__a , file_path=__a , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__a ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def _lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_UpperCamelCase :List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Union[str, Any] =parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCamelCase :Union[str, Any] =AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase :Dict =AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCamelCase :List[Any] =CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
_UpperCamelCase :Dict =AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCamelCase :str =AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
_UpperCamelCase :str =AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
_UpperCamelCase :Union[str, Any] =AutoModelWithLMHead.from_config(__a )
model.resize_token_embeddings(len(__a ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
_UpperCamelCase :Union[str, Any] =tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCamelCase :Any =min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCamelCase :Optional[int] =(
get_dataset(__a , tokenizer=__a , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCamelCase :Any =(
get_dataset(__a , tokenizer=__a , evaluate=__a , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCamelCase :Union[str, Any] =DataCollatorForPermutationLanguageModeling(
tokenizer=__a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCamelCase :int =DataCollatorForWholeWordMask(
tokenizer=__a , mlm_probability=data_args.mlm_probability )
else:
_UpperCamelCase :Tuple =DataCollatorForLanguageModeling(
tokenizer=__a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCamelCase :str =Trainer(
model=__a , args=__a , data_collator=__a , train_dataset=__a , eval_dataset=__a , prediction_loss_only=__a , )
# Training
if training_args.do_train:
_UpperCamelCase :Optional[Any] =(
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__a )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase :int ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCamelCase :List[Any] =trainer.evaluate()
_UpperCamelCase :int =math.exp(eval_output["""eval_loss"""] )
_UpperCamelCase :Optional[int] ={"""perplexity""": perplexity}
_UpperCamelCase :Dict =os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(__a , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , __a , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(__a )
return results
def _lowerCAmelCase ( __a ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 512
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __snake_case , __snake_case , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionXLImgaImgPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
__UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase :List[str] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_UpperCamelCase :Any =EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_UpperCamelCase :Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase :Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
_UpperCamelCase :str =CLIPTextModel(lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase__ )
_UpperCamelCase :List[Any] =CLIPTextModelWithProjection(lowerCAmelCase__ )
_UpperCamelCase :Tuple =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=lowerCAmelCase__ )
_UpperCamelCase :Tuple ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> str:
"""simple docstring"""
_UpperCamelCase :Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
_UpperCamelCase :List[str] =image / 2 + 0.5
if str(lowerCAmelCase__ ).startswith("""mps""" ):
_UpperCamelCase :Optional[Any] =torch.manual_seed(lowerCAmelCase__ )
else:
_UpperCamelCase :Union[str, Any] =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :Tuple ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase :Union[str, Any] =self.get_dummy_components()
_UpperCamelCase :int =StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =sd_pipe(**lowerCAmelCase__ ).images
_UpperCamelCase :Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase :int =np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :int =self.get_dummy_components()
_UpperCamelCase :str =StableDiffusionXLImgaImgPipeline(**lowerCAmelCase__ )
_UpperCamelCase :Optional[Any] =sd_pipe.to(lowerCAmelCase__ )
_UpperCamelCase :Any =sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
# forward without prompt embeds
_UpperCamelCase :Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Optional[int] =3 * ["""this is a negative prompt"""]
_UpperCamelCase :Optional[int] =negative_prompt
_UpperCamelCase :str =3 * [inputs["""prompt"""]]
_UpperCamelCase :Tuple =sd_pipe(**lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCamelCase :Optional[Any] =self.get_dummy_inputs(lowerCAmelCase__ )
_UpperCamelCase :Dict =3 * ["""this is a negative prompt"""]
_UpperCamelCase :Tuple =3 * [inputs.pop("""prompt""" )]
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) :Any =sd_pipe.encode_prompt(lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
_UpperCamelCase :Tuple =sd_pipe(
**lowerCAmelCase__ , prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , pooled_prompt_embeds=lowerCAmelCase__ , negative_pooled_prompt_embeds=lowerCAmelCase__ , )
_UpperCamelCase :Optional[int] =output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Tuple =torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] =np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_UpperCamelCase :Optional[int] =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
_UpperCamelCase :Union[str, Any] ={
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :str =DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCamelCase :Tuple =self.get_inputs(lowerCAmelCase__ )
_UpperCamelCase :str =pipe(**lowerCAmelCase__ ).images
_UpperCamelCase :str =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCamelCase :Optional[Any] =np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 512
| 1
|
'''simple docstring'''
UpperCAmelCase = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 119
|
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase : List[Any] = 0
for ch in input_str:
UpperCamelCase : Optional[Any] = ord(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = pow(2 , _lowerCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 629
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
SCREAMING_SNAKE_CASE : Tuple = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
SCREAMING_SNAKE_CASE : str = dataset.iloc[:, 1:2].values
SCREAMING_SNAKE_CASE : List[Any] = dataset.iloc[:, 2].values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = train_test_split(X, y, test_size=0.2, random_state=0)
SCREAMING_SNAKE_CASE : Optional[int] = PolynomialFeatures(degree=4)
SCREAMING_SNAKE_CASE : List[str] = poly_reg.fit_transform(X)
SCREAMING_SNAKE_CASE : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase_( ) -> Any:
plt.scatter(lowerCamelCase_ , lowerCamelCase_ , color='red' )
plt.plot(lowerCamelCase_ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase_ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 354
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=30, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=None, lowerCamelCase=2, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : int = batch_size
_lowercase : int = image_size
_lowercase : str = patch_size
_lowercase : int = num_channels
_lowercase : Any = is_training
_lowercase : Union[str, Any] = use_labels
_lowercase : Dict = hidden_size
_lowercase : List[str] = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Optional[int] = intermediate_size
_lowercase : Tuple = hidden_act
_lowercase : str = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : Tuple = type_sequence_label_size
_lowercase : List[str] = initializer_range
_lowercase : Any = scope
_lowercase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
_lowercase : Union[str, Any] = (image_size // patch_size) ** 2
_lowercase : Any = num_patches + 2
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : str = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = DeiTModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = DeiTForMaskedImageModeling(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
_lowercase : Any = 1
_lowercase : Optional[Any] = DeiTForMaskedImageModeling(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = self.type_sequence_label_size
_lowercase : Dict = DeiTForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
_lowercase : Optional[Any] = 1
_lowercase : Optional[Any] = DeiTForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[Any] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[Any] = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ : Dict = False
lowercase_ : List[str] = False
lowercase_ : Union[str, Any] = False
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = DeiTModelTester(self)
_lowercase : Optional[Any] = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def UpperCamelCase ( self) -> str:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : int = model_class(lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
_lowercase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear))
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : Optional[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Union[str, Any] = [*signature.parameters.keys()]
_lowercase : str = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Any:
"""simple docstring"""
_lowercase : Dict = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
if not self.model_tester.is_training:
return
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Tuple = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
_lowercase : Optional[int] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.train()
_lowercase : Optional[Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
_lowercase : List[str] = model(**lowerCamelCase).loss
loss.backward()
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_lowercase : Dict = False
_lowercase : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
_lowercase : str = model_class(lowerCamelCase)
model.gradient_checkpointing_enable()
model.to(lowerCamelCase)
model.train()
_lowercase : Union[str, Any] = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
_lowercase : List[Any] = model(**lowerCamelCase).loss
loss.backward()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : int = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase),
*get_values(lowerCamelCase),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}'''):
_lowercase : List[Any] = problem_type['title']
_lowercase : str = problem_type['num_labels']
_lowercase : Optional[int] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.train()
_lowercase : Tuple = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if problem_type["num_labels"] > 1:
_lowercase : Dict = inputs['labels'].unsqueeze(1).repeat(1, problem_type['num_labels'])
_lowercase : Optional[int] = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase) as warning_list:
_lowercase : Dict = model(**lowerCamelCase).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''')
loss.backward()
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DeiTModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> List[str]:
_lowercase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
lowerCamelCase)
_lowercase : List[str] = self.default_image_processor
_lowercase : List[str] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : int = model(**lowerCamelCase)
# verify the logits
_lowercase : Any = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224', torch_dtype=torch.floataa, device_map='auto')
_lowercase : Union[str, Any] = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : int = image_processor(images=lowerCamelCase, return_tensors='pt')
_lowercase : Union[str, Any] = inputs.pixel_values.to(lowerCamelCase)
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase : Optional[int] = model(lowerCamelCase)
| 354
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 691
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 691
| 1
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE="29500" ):
'''simple docstring'''
__UpperCamelCase :str = False
__UpperCamelCase :str = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
__UpperCamelCase :Optional[Any] = True
elif "IPython" in sys.modules:
__UpperCamelCase :str = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
__UpperCamelCase :Any = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , __A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
__UpperCamelCase :List[str] = 8
__UpperCamelCase :Tuple = PrepareForLaunch(__A , distributed_type='''TPU''' )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(__A , args=__A , nprocs=__A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*__A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port=__A , mixed_precision=__A ):
__UpperCamelCase :int = PrepareForLaunch(__A , distributed_type='''MULTI_GPU''' )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__UpperCamelCase :Optional[int] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*__A )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
__UpperCamelCase :Union[str, Any] = PrepareForLaunch(__A , debug=__A )
start_processes(__A , args=__A , nprocs=__A , start_method='''fork''' )
| 701
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowerCamelCase ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 452
| 0
|
'''simple docstring'''
import heapq
import sys
import numpy as np
__UpperCAmelCase = tuple[int, int]
class a__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return len(self.elements ) == 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCamelCase_ )
else:
# update
# print("update", item)
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tuple:
if item in self.set:
self.set.remove(lowerCamelCase_ )
lowerCAmelCase__ = []
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.elements[0][1]
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
((lowerCAmelCase__) , (lowerCAmelCase__)) = heapq.heappop(self.elements )
self.set.remove(lowerCamelCase_ )
return (priority, item)
def _snake_case ( A , A ) -> Tuple:
# euclidean distance
lowerCAmelCase__ = np.array(A )
lowerCAmelCase__ = np.array(A )
return np.linalg.norm(a - b )
def _snake_case ( A , A ) -> List[str]:
# integer division by time variable
return consistent_heuristic(A , A ) // t
def _snake_case ( A , A ) -> List[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def _snake_case ( A , A , A , A ) -> str:
lowerCAmelCase__ = g_function[start] + Wa * heuristics[i](A , A )
return ans
def _snake_case ( A , A , A ) -> int:
lowerCAmelCase__ = np.chararray((n, n) )
for i in range(A ):
for j in range(A ):
lowerCAmelCase__ = '''*'''
for i in range(A ):
for j in range(A ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase__ = '''#'''
lowerCAmelCase__ = '''-'''
lowerCAmelCase__ = back_pointer[goal]
while x != start:
((lowerCAmelCase__) , (lowerCAmelCase__)) = x
# print(x)
lowerCAmelCase__ = '''-'''
lowerCAmelCase__ = back_pointer[x]
lowerCAmelCase__ = '''-'''
for i in range(A ):
for j in range(A ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowerCAmelCase__ = back_pointer[goal]
while x != start:
print(A , end=''' ''' )
lowerCAmelCase__ = back_pointer[x]
print(A )
sys.exit()
def _snake_case ( A ) -> Any:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def _snake_case ( A , A , A , A , A , A , A , A , ) -> str:
for itera in range(A ):
open_list[itera].remove_element(A )
# print("s", s)
# print("j", j)
((lowerCAmelCase__) , (lowerCAmelCase__)) = s
lowerCAmelCase__ = (x - 1, y)
lowerCAmelCase__ = (x + 1, y)
lowerCAmelCase__ = (x, y + 1)
lowerCAmelCase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(A ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(A )
lowerCAmelCase__ = -1
lowerCAmelCase__ = float('''inf''' )
if valid(A ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase__ = g_function[s] + 1
lowerCAmelCase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(A , key(A , 0 , A , A ) )
if neighbours not in close_list_inad:
for var in range(1 , A ):
if key(A , A , A , A ) <= Wa * key(
A , 0 , A , A ):
open_list[j].put(
A , key(A , A , A , A ) )
def _snake_case ( ) -> str:
lowerCAmelCase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__UpperCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__UpperCAmelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__UpperCAmelCase = make_common_ground()
__UpperCAmelCase = blocks_blk
# hyper parameters
__UpperCAmelCase = 1
__UpperCAmelCase = 1
__UpperCAmelCase = 20
__UpperCAmelCase = 3 # one consistent and two other inconsistent
# start and end destination
__UpperCAmelCase = (0, 0)
__UpperCAmelCase = (n - 1, n - 1)
__UpperCAmelCase = 1
def _snake_case ( A , A , A ) -> Dict:
lowerCAmelCase__ = {start: 0, goal: float('''inf''' )}
lowerCAmelCase__ = {start: -1, goal: -1}
lowerCAmelCase__ = []
lowerCAmelCase__ = set()
for i in range(A ):
open_list.append(PriorityQueue() )
open_list[i].put(A , key(A , A , A , A ) )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , A ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(A , A , A )
else:
lowerCAmelCase__ , lowerCAmelCase__ = open_list[i].top_show()
visited.add(A )
expand_state(
A , A , A , A , A , A , A , A , )
close_list_inad.append(A )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(A , A , A )
else:
lowerCAmelCase__ = open_list[0].top_show()
visited.add(A )
expand_state(
A , 0 , A , A , A , A , A , A , )
close_list_anchor.append(A )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(A ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 90
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : int = ['torch', 'transformers', 'onnx']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 86
| 0
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = emb.weight.shape
__SCREAMING_SNAKE_CASE : str = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = emb.weight.data
return lin_layer
def _lowerCAmelCase ( __lowerCamelCase : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = torch.load(__lowerCamelCase , map_location="cpu" )
__SCREAMING_SNAKE_CASE : List[str] = Namespace(**checkpoint["cfg"]["model"] )
__SCREAMING_SNAKE_CASE : Tuple = checkpoint["model"]
remove_ignore_keys_(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = state_dict["decoder.embed_tokens.weight"].shape[0]
__SCREAMING_SNAKE_CASE : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__SCREAMING_SNAKE_CASE : str = XGLMConfig(
vocab_size=__lowerCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__SCREAMING_SNAKE_CASE : List[Any] = XGLMForCausalLM(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
print(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 447
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : int = 3_2 , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_5_5 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCamelCase : bool = True , UpperCamelCase : Dict=7 , UpperCamelCase : Dict=3_0 , UpperCamelCase : Tuple=4_0_0 , UpperCamelCase : Union[str, Any]=3 , )->Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
__SCREAMING_SNAKE_CASE : str = do_resize
__SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"shortest_edge": 2_8_8}
__SCREAMING_SNAKE_CASE : List[str] = size_divisor
__SCREAMING_SNAKE_CASE : str = do_rescale
__SCREAMING_SNAKE_CASE : int = rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = do_center_crop
__SCREAMING_SNAKE_CASE : List[str] = image_mean
__SCREAMING_SNAKE_CASE : Optional[Any] = image_std
__SCREAMING_SNAKE_CASE : Optional[int] = do_pad
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
__SCREAMING_SNAKE_CASE : str = num_channels
__SCREAMING_SNAKE_CASE : List[str] = min_resolution
__SCREAMING_SNAKE_CASE : Tuple = max_resolution
def __snake_case ( self : Optional[Any] )->List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def __snake_case ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict=False )->Optional[Any]:
if not batched:
__SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"]
__SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = image.size
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = image.shape[1], image.shape[2]
__SCREAMING_SNAKE_CASE : Tuple = size / min(UpperCamelCase , UpperCamelCase )
if h < w:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = size, scale * w
else:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = scale * h, size
__SCREAMING_SNAKE_CASE : Optional[Any] = int((1_3_3_3 / 8_0_0) * size )
if max(UpperCamelCase , UpperCamelCase ) > max_size:
__SCREAMING_SNAKE_CASE : Any = max_size / max(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = newh * scale
__SCREAMING_SNAKE_CASE : Optional[int] = neww * scale
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE : int = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
__SCREAMING_SNAKE_CASE : List[str] = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = BridgeTowerImageProcessor if is_vision_available() else None
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : str = BridgeTowerImageProcessingTester(self )
@property
def __snake_case ( self : str )->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[int] )->Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase , "size" ) )
self.assertTrue(hasattr(UpperCamelCase , "size_divisor" ) )
def __snake_case ( self : Optional[int] )->int:
pass
def __snake_case ( self : str )->Union[str, Any]:
# Initialize image processor
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : List[str] )->Optional[int]:
# Initialize image processor
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : str = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __snake_case ( self : Optional[Any] )->Union[str, Any]:
# Initialize image processor
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE : str = image_processing(UpperCamelCase , return_tensors="pt" ).pixel_values
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 447
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
class a__ ( UpperCamelCase_ , UpperCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] ,a__ : int = 32 ,a__ : int = 64 ,a__ : int = 20 ,a__ : int = 768 ,a__ : Union[str, Any]=77 ,a__ : Union[str, Any]=4 ,a__ : float = 0.0 ,a__ : str = "silu" ,a__ : Optional[str] = None ,a__ : Optional[str] = None ,a__ : Optional[str] = "linear" ,a__ : Optional[str] = "prd" ,a__ : Optional[int] = None ,a__ : Optional[int] = None ,a__ : Optional[int] = None ,) -> Optional[Any]:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:str = num_attention_heads
_lowerCAmelCase:Any = attention_head_dim
_lowerCAmelCase:str = num_attention_heads * attention_head_dim
_lowerCAmelCase:Any = additional_embeddings
_lowerCAmelCase:Any = time_embed_dim or inner_dim
_lowerCAmelCase:Optional[int] = embedding_proj_dim or embedding_dim
_lowerCAmelCase:Any = clip_embed_dim or embedding_dim
_lowerCAmelCase:str = Timesteps(a__ ,a__ ,0)
_lowerCAmelCase:Tuple = TimestepEmbedding(a__ ,a__ ,out_dim=a__ ,act_fn=a__)
_lowerCAmelCase:str = nn.Linear(a__ ,a__)
if embedding_proj_norm_type is None:
_lowerCAmelCase:str = None
elif embedding_proj_norm_type == "layer":
_lowerCAmelCase:Any = nn.LayerNorm(a__)
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}')
_lowerCAmelCase:Tuple = nn.Linear(a__ ,a__)
if encoder_hid_proj_type is None:
_lowerCAmelCase:Tuple = None
elif encoder_hid_proj_type == "linear":
_lowerCAmelCase:int = nn.Linear(a__ ,a__)
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}')
_lowerCAmelCase:Union[str, Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,a__))
if added_emb_type == "prd":
_lowerCAmelCase:str = nn.Parameter(torch.zeros(1 ,1 ,a__))
elif added_emb_type is None:
_lowerCAmelCase:Optional[Any] = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.')
_lowerCAmelCase:Dict = nn.ModuleList(
[
BasicTransformerBlock(
a__ ,a__ ,a__ ,dropout=a__ ,activation_fn='''gelu''' ,attention_bias=a__ ,)
for d in range(a__)
])
if norm_in_type == "layer":
_lowerCAmelCase:Dict = nn.LayerNorm(a__)
elif norm_in_type is None:
_lowerCAmelCase:List[Any] = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.')
_lowerCAmelCase:Dict = nn.LayerNorm(a__)
_lowerCAmelCase:Optional[int] = nn.Linear(a__ ,a__)
_lowerCAmelCase:Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0)
causal_attention_mask.triu_(1)
_lowerCAmelCase:int = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' ,a__ ,persistent=a__)
_lowerCAmelCase:Tuple = nn.Parameter(torch.zeros(1 ,a__))
_lowerCAmelCase:Tuple = nn.Parameter(torch.zeros(1 ,a__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : List[str]) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
_lowerCAmelCase:Union[str, Any] = {}
def fn_recursive_add_processors(a__ : str ,a__ : torch.nn.Module ,a__ : Dict[str, AttentionProcessor]):
if hasattr(a__ ,'''set_processor'''):
_lowerCAmelCase:int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' ,a__ ,a__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(a__ ,a__ ,a__)
return processors
def __UpperCamelCase ( self : Any ,a__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Tuple = len(self.attn_processors.keys())
if isinstance(a__ ,a__) and len(a__) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(a__)} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.')
def fn_recursive_attn_processor(a__ : str ,a__ : torch.nn.Module ,a__ : Tuple):
if hasattr(a__ ,'''set_processor'''):
if not isinstance(a__ ,a__):
module.set_processor(a__)
else:
module.set_processor(processor.pop(F'{name}.processor'))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' ,a__ ,a__)
for name, module in self.named_children():
fn_recursive_attn_processor(a__ ,a__ ,a__)
def __UpperCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
self.set_attn_processor(AttnProcessor())
def __UpperCamelCase ( self : Optional[int] ,a__ : List[str] ,a__ : Union[torch.Tensor, float, int] ,a__ : torch.FloatTensor ,a__ : Optional[torch.FloatTensor] = None ,a__ : Optional[torch.BoolTensor] = None ,a__ : bool = True ,) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = hidden_states.shape[0]
_lowerCAmelCase:Any = timestep
if not torch.is_tensor(a__):
_lowerCAmelCase:List[str] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device)
elif torch.is_tensor(a__) and len(timesteps.shape) == 0:
_lowerCAmelCase:Tuple = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase:str = timesteps * torch.ones(a__ ,dtype=timesteps.dtype ,device=timesteps.device)
_lowerCAmelCase:List[str] = self.time_proj(a__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCAmelCase:Optional[Any] = timesteps_projected.to(dtype=self.dtype)
_lowerCAmelCase:str = self.time_embedding(a__)
if self.embedding_proj_norm is not None:
_lowerCAmelCase:Tuple = self.embedding_proj_norm(a__)
_lowerCAmelCase:Optional[int] = self.embedding_proj(a__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCAmelCase:List[str] = self.encoder_hidden_states_proj(a__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''')
_lowerCAmelCase:Dict = self.proj_in(a__)
_lowerCAmelCase:Union[str, Any] = self.positional_embedding.to(hidden_states.dtype)
_lowerCAmelCase:List[str] = []
_lowerCAmelCase:str = 0
if encoder_hidden_states is not None:
additional_embeds.append(a__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
_lowerCAmelCase:Optional[Any] = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
_lowerCAmelCase:Optional[Any] = hidden_states[:, None, :]
_lowerCAmelCase:Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCAmelCase:Union[str, Any] = self.prd_embedding.to(hidden_states.dtype).expand(a__ ,-1 ,-1)
additional_embeds.append(a__)
_lowerCAmelCase:Dict = torch.cat(
a__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCAmelCase:Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCAmelCase:Optional[int] = F.pad(
a__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
_lowerCAmelCase:List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCAmelCase:int = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
_lowerCAmelCase:str = F.pad(a__ ,(0, self.additional_embeddings) ,value=0.0)
_lowerCAmelCase:Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
_lowerCAmelCase:str = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0)
if self.norm_in is not None:
_lowerCAmelCase:Union[str, Any] = self.norm_in(a__)
for block in self.transformer_blocks:
_lowerCAmelCase:Any = block(a__ ,attention_mask=a__)
_lowerCAmelCase:int = self.norm_out(a__)
if self.prd_embedding is not None:
_lowerCAmelCase:List[str] = hidden_states[:, -1]
else:
_lowerCAmelCase:Any = hidden_states[:, additional_embeddings_len:]
_lowerCAmelCase:int = self.proj_to_clip_embeddings(a__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=a__)
def __UpperCamelCase ( self : Tuple ,a__ : Optional[int]) -> Dict:
"""simple docstring"""
_lowerCAmelCase:Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 227
| 0
|
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
__lowerCAmelCase = os.path.abspath(_lowerCAmelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location="""cpu""" )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
__lowerCAmelCase = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__lowerCAmelCase = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase )
return flax_state_dict
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
def is_key_or_prefix_key_in_dict(_lowerCAmelCase ) -> bool:
return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__lowerCAmelCase = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__lowerCAmelCase = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__lowerCAmelCase = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__lowerCAmelCase = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ):
__lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__lowerCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__lowerCAmelCase = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__lowerCAmelCase = pt_tuple_key[-2] + """_v"""
if name is not None:
__lowerCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
# convert pytorch tensor to numpy
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__lowerCAmelCase = flax_model.params["""params"""]
else:
__lowerCAmelCase = flax_model.params
__lowerCAmelCase = flatten_dict(_lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
__lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
import torch
# Load the index
__lowerCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
__lowerCAmelCase = torch.load(_lowerCAmelCase )
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
__lowerCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__lowerCAmelCase = flax_model.params["""params"""]
__lowerCAmelCase = flatten_dict(_lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
__lowerCAmelCase = flax_model.params
__lowerCAmelCase = flatten_dict(_lowerCAmelCase )
__lowerCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
__lowerCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
__lowerCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase = rename_key_and_reshape_tensor(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# add model prefix if necessary
__lowerCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(_lowerCAmelCase )
return unflatten_dict(_lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = os.path.abspath(_lowerCAmelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
__lowerCAmelCase = getattr(_lowerCAmelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCAmelCase , """rb""" ) as state_f:
try:
__lowerCAmelCase = from_bytes(_lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase )
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
__lowerCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values()
if any(_lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
__lowerCAmelCase = jax.tree_util.tree_map(
lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase )
__lowerCAmelCase = flatten_dict(_lowerCAmelCase )
__lowerCAmelCase = pt_model.state_dict()
__lowerCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
__lowerCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__lowerCAmelCase = []
__lowerCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
__lowerCAmelCase = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__lowerCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__lowerCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# conv layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
__lowerCAmelCase = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict:
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
__lowerCAmelCase = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
__lowerCAmelCase = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__lowerCAmelCase = """.""".join(_lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__lowerCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__lowerCAmelCase = key.split(""".""" )
__lowerCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__lowerCAmelCase = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
__lowerCAmelCase = key_components[-2] + """_v"""
if name is not None:
__lowerCAmelCase = key_components[:-3] + [name]
__lowerCAmelCase = """.""".join(_lowerCAmelCase )
__lowerCAmelCase = key
if flax_key in special_pt_names:
__lowerCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__lowerCAmelCase = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor
__lowerCAmelCase = torch.from_numpy(_lowerCAmelCase )
# remove from missing keys
missing_keys.remove(_lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCAmelCase )
pt_model.load_state_dict(_lowerCAmelCase )
# re-transform missing_keys to list
__lowerCAmelCase = list(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_lowerCAmelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 718
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 573
| 0
|
from timeit import timeit
_lowerCAmelCase : int = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def a_ ( UpperCamelCase_ : str ) -> bool:
"""simple docstring"""
lowerCamelCase = 0
lowerCamelCase = len(UpperCamelCase_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def a_ ( UpperCamelCase_ : str ) -> bool:
"""simple docstring"""
lowerCamelCase = len(UpperCamelCase_ ) // 2
lowerCamelCase = len(UpperCamelCase_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCamelCase_ ) )
def a_ ( UpperCamelCase_ : str ) -> bool:
"""simple docstring"""
if len(UpperCamelCase_ ) <= 2:
return True
if s[0] == s[len(UpperCamelCase_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def a_ ( UpperCamelCase_ : str ) -> bool:
"""simple docstring"""
return s == s[::-1]
def a_ ( UpperCamelCase_ : str ) -> None:
"""simple docstring"""
lowerCamelCase = f'''all({name}(key) is value for key, value in test_data.items())'''
lowerCamelCase = f'''from __main__ import test_data, {name}'''
lowerCamelCase = 5_0_0_0_0_0
lowerCamelCase = timeit(stmt=UpperCamelCase_ , setup=UpperCamelCase_ , number=UpperCamelCase_ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 246
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_lowerCAmelCase : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ['DPTFeatureExtractor']
_lowerCAmelCase : Tuple = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 246
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class A__ ( _snake_case ):
"""simple docstring"""
_lowercase = ['input_values', 'padding_mask']
def __init__( self : str , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 24_000 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : float = None , lowerCamelCase__ : float = None , **lowerCamelCase__ : Union[str, Any] , ):
super().__init__(feature_size=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , padding_value=lowerCamelCase__ , **lowerCamelCase__ )
a__ : Optional[Any] = chunk_length_s
a__ : Dict = overlap
@property
def _UpperCamelCase( self : Tuple ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCamelCase( self : Any ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple , lowerCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase__ : Optional[Union[bool, str, PaddingStrategy]] = None , lowerCamelCase__ : Optional[bool] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
a__ : Tuple = True
a__ : Optional[Any] = bool(
isinstance(lowerCamelCase__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
a__ : List[Any] = [np.asarray(lowerCamelCase__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase__ , np.ndarray ):
a__ : Optional[int] = np.asarray(lowerCamelCase__ , dtype=np.floataa )
elif isinstance(lowerCamelCase__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
a__ : List[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Optional[Any] = [np.asarray(lowerCamelCase__ ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase__ ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
a__ : Optional[Any] = None
a__ : Dict = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
a__ : Optional[int] = min(array.shape[0] for array in raw_audio )
a__ : Optional[Any] = int(np.floor(max_length / self.chunk_stride ) )
a__ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
a__ : List[str] = max(array.shape[0] for array in raw_audio )
a__ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) )
a__ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length
a__ : int = "max_length"
else:
a__ : int = input_values
# normal padding on batch
if padded_inputs is None:
a__ : Optional[int] = self.pad(
lowerCamelCase__ , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , padding=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
if padding:
a__ : Union[str, Any] = padded_inputs.pop("attention_mask" )
a__ : List[str] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
a__ : Dict = example[..., None]
input_values.append(example.T )
a__ : Optional[Any] = input_values
if return_tensors is not None:
a__ : Tuple = padded_inputs.convert_to_tensors(lowerCamelCase__ )
return padded_inputs
| 713
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
UpperCamelCase : Any = """pytorch_model.bin"""
@dataclasses.dataclass
class A__ :
"""simple docstring"""
_lowercase = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class A__ :
"""simple docstring"""
_lowercase = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
_lowercase = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'The name of the task to train on.'} , )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class A__ :
"""simple docstring"""
_lowercase = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
_lowercase = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
_lowercase = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
_lowercase = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_lowercase = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
_lowercase = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
_lowercase = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
_lowercase = dataclasses.field(
default=A__ , metadata={'help': 'Random seed for initialization.'} , )
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
a__ : Any = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a__ : str = dataset.filter(lambda __a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a__ : Tuple = int(eval_result * len(__a ) )
print(__a )
a__ : Optional[Any] = dataset.sort("probability" , reverse=__a )
a__ : Optional[int] = dataset.select(range(__a ) )
a__ : List[str] = dataset.remove_columns(["label", "probability"] )
a__ : Union[str, Any] = dataset.rename_column("prediction" , "label" )
a__ : int = dataset.map(lambda __a : {"label": idalabel[example["label"]]} )
a__ : Optional[int] = dataset.shuffle(seed=args.seed )
a__ : str = os.path.join(__a , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(__a , index=__a )
else:
dataset.to_json(__a )
def UpperCamelCase_ ( __a , __a , __a , __a , **__a ) -> Dict:
a__ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a__ : int = STModelArguments(model_name_or_path=__a )
a__ : Optional[int] = STDataArguments(train_file=__a , infer_file=__a )
a__ : List[Any] = STTrainingArguments(output_dir=__a )
a__ : Union[str, Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__a ).items():
setattr(__a , __a , __a )
for key, value in kwargs.items():
if hasattr(__a , __a ):
setattr(__a , __a , __a )
# Sanity checks
a__ : List[Any] = {}
a__ : Optional[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a__ : Union[str, Any] = args.train_file
a__ : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a__ : Tuple = args.eval_file
for key in data_files:
a__ : Optional[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a__ : List[Any] = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
a__ : Any = f'''{args.output_dir}/self-train_iter-{{}}'''.format
a__ : List[str] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__a )
os.makedirs(__a , exist_ok=__a )
accelerator.wait_for_everyone()
a__ : Optional[int] = None
a__ : str = None
a__ : List[Any] = 0
a__ : List[Any] = False
# Show the progress bar
a__ : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a__ : Optional[int] = data_dir_format(__a )
assert os.path.exists(__a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a__ : Union[str, Any] = os.path.join(__a , "stage-1" )
a__ : str = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__a , __a ):
arguments_dict.update({key: value} )
a__ : Tuple = os.path.join(__a , "best-checkpoint" , __a )
if os.path.exists(__a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __a , __a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a__ : Any = os.path.join(__a , "best-checkpoint" )
a__ : Optional[int] = os.path.join(__a , "stage-2" )
# Update arguments_dict
a__ : Union[str, Any] = model_path
a__ : Union[str, Any] = data_files["train"]
a__ : Optional[Any] = current_output_dir
a__ : Optional[int] = os.path.join(__a , "best-checkpoint" , __a )
if os.path.exists(__a ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __a , __a , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __a )
finetune(**__a )
accelerator.wait_for_everyone()
assert os.path.exists(__a )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __a )
a__ : Dict = iteration
a__ : List[str] = data_dir_format(iteration + 1 )
a__ : Union[str, Any] = AutoConfig.from_pretrained(os.path.join(__a , "best-checkpoint" ) )
a__ : str = config.idalabel
a__ : Union[str, Any] = os.path.join(__a , "eval_results_best-checkpoint.json" )
a__ : Dict = os.path.join(__a , "test_results_best-checkpoint.json" )
assert os.path.exists(__a )
with open(__a , "r" ) as f:
a__ : Optional[int] = float(json.load(__a )[args.eval_metric] )
a__ : Union[str, Any] = os.path.join(__a , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__a )
# Loading the dataset from local csv or json files.
a__ : List[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
a__ : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__a , exist_ok=__a )
shutil.copy(__a , os.path.join(__a , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(__a ):
shutil.copy(__a , os.path.join(__a , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(__a , __a , __a , __a , __a , __a )
accelerator.wait_for_everyone()
a__ : Optional[int] = os.path.join(__a , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a__ : str = eval_result
if best_iteration is None:
a__ : Union[str, Any] = new_iteration
a__ : Dict = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a__ : List[str] = new_iteration
a__ : List[Any] = new_eval_result
a__ : Dict = 0
else:
if new_eval_result == best_eval_result:
a__ : Optional[int] = new_iteration
a__ : Any = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a__ : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __a )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__a , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(__a , "eval_results_best-iteration.json" ) , )
| 151
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : List[Any] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase__ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase__ : Dict = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase__ : str = matrix[1][1], matrix[0][0]
UpperCAmelCase__ : Tuple = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase__ : List[Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
UpperCAmelCase__ : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase__ : Union[str, Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase__ : int = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ : Optional[int] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase__ : Tuple = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase__ : str = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase__ : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase__ : str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase__ : Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase__ : Any = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase__ : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase__ : Dict = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase_ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 110
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 250
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ (a__ , a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline
__UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
__UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
__UpperCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCamelCase : Tuple = frozenset([] )
__UpperCamelCase : int = True
@property
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : List[Any] = 4
SCREAMING_SNAKE_CASE__ : str = (16, 16)
SCREAMING_SNAKE_CASE__ : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
return image
def __magic_name__ (self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=SCREAMING_SNAKE_CASE__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=SCREAMING_SNAKE_CASE__ , only_cross_attention=SCREAMING_SNAKE_CASE__ , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = EulerDiscreteScheduler(prediction_type="""sample""" )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""quick_gelu""" , projection_dim=5_12 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ : str = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """cpu"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE__ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-3 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __magic_name__ (self ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = 2
SCREAMING_SNAKE_CASE__ : int = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , scheduler_enum.name )
SCREAMING_SNAKE_CASE__ : Any = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : int = pipe(**SCREAMING_SNAKE_CASE__ )[0]
outputs.append(SCREAMING_SNAKE_CASE__ )
assert check_same_shape(SCREAMING_SNAKE_CASE__ )
@require_torch_gpu
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
SCREAMING_SNAKE_CASE__ : Dict = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type="""latent""" ).images
SCREAMING_SNAKE_CASE__ : Optional[int] = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ).images[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
SCREAMING_SNAKE_CASE__ : Dict = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
SCREAMING_SNAKE_CASE__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
SCREAMING_SNAKE_CASE__ : int = upscaler(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , ).images[0]
SCREAMING_SNAKE_CASE__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 545
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''detr'''
__UpperCamelCase : List[Any] = ['''past_key_values''']
__UpperCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__="resnet50" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE__ : int = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = None, None, None
SCREAMING_SNAKE_CASE__ : Optional[int] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Tuple = backbone_config
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = d_model
SCREAMING_SNAKE_CASE__ : str = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : Any = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Dict = encoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : List[str] = backbone
SCREAMING_SNAKE_CASE__ : Dict = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Tuple = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.d_model
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return cls(backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Any = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Dict = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-5
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 545
| 1
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 78
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 67
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""ConditionalDetrFeatureExtractor"""]
_lowercase = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class a_ ( UpperCAmelCase__ ):
lowercase_ : Any = '''poolformer'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Tuple=1_6 , __lowerCAmelCase : Union[str, Any]=1_6 , __lowerCAmelCase : int=3 , __lowerCAmelCase : List[str]=4.0 , __lowerCAmelCase : int=[2, 2, 6, 2] , __lowerCAmelCase : Union[str, Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , __lowerCAmelCase : int=[7, 3, 3, 3] , __lowerCAmelCase : Any=[4, 2, 2, 2] , __lowerCAmelCase : List[str]=[2, 1, 1, 1] , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Dict=0.02 , **__lowerCAmelCase : Dict , ):
__snake_case = num_channels
__snake_case = patch_size
__snake_case = stride
__snake_case = padding
__snake_case = pool_size
__snake_case = hidden_sizes
__snake_case = mlp_ratio
__snake_case = depths
__snake_case = patch_sizes
__snake_case = strides
__snake_case = num_encoder_blocks
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_layer_scale
__snake_case = layer_scale_init_value
__snake_case = initializer_range
super().__init__(**__lowerCAmelCase )
class a_ ( UpperCAmelCase__ ):
lowercase_ : Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : str ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase__ ( self : Tuple ):
return 2E-3
| 427
| 1
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 2
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , *, # begin keyword-only arguments
_UpperCamelCase : Optional[int]="<s>" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : Any="</s>" , _UpperCamelCase : List[str]="<unk>" , _UpperCamelCase : List[str]=None , ):
_lowercase , _lowercase , _lowercase , _lowercase: Any = bos, unk, pad, eos
_lowercase: List[str] = []
_lowercase: Union[str, Any] = []
_lowercase: List[Any] = {}
_lowercase: Optional[int] = self.add_symbol(_UpperCamelCase)
_lowercase: str = self.add_symbol(_UpperCamelCase)
_lowercase: List[str] = self.add_symbol(_UpperCamelCase)
_lowercase: str = self.add_symbol(_UpperCamelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_UpperCamelCase)
_lowercase: List[Any] = len(self.symbols)
def __eq__( self : Any , _UpperCamelCase : List[str]):
return self.indices == other.indices
def __getitem__( self : Optional[Any] , _UpperCamelCase : List[Any]):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self : Tuple):
return len(self.symbols)
def __contains__( self : Any , _UpperCamelCase : str):
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , _UpperCamelCase : Optional[Any]):
_lowercase: Dict = cls()
d.add_from_file(_UpperCamelCase)
return d
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Any=1 , _UpperCamelCase : Optional[Any]=False):
if word in self.indices and not overwrite:
_lowercase: List[str] = self.indices[word]
_lowercase: str = self.count[idx] + n
return idx
else:
_lowercase: List[str] = len(self.symbols)
_lowercase: List[str] = idx
self.symbols.append(_UpperCamelCase)
self.count.append(_UpperCamelCase)
return idx
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[Any]):
return 0
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : List[str]):
if isinstance(_UpperCamelCase , _UpperCamelCase):
try:
with open(_UpperCamelCase , "r" , encoding="utf-8") as fd:
self.add_from_file(_UpperCamelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_UpperCamelCase))
return
_lowercase: Optional[int] = f.readlines()
_lowercase: int = self._load_meta(_UpperCamelCase)
for line in lines[indices_start_line:]:
try:
_lowercase , _lowercase: List[str] = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
_lowercase: Optional[Any] = True
_lowercase , _lowercase: int = line.rsplit(" " , 1)
else:
_lowercase: Optional[Any] = False
_lowercase: Optional[Any] = int(_UpperCamelCase)
_lowercase: Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(_UpperCamelCase))
self.add_symbol(_UpperCamelCase , n=_UpperCamelCase , overwrite=_UpperCamelCase)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def __lowerCAmelCase ( __magic_name__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
_lowercase: List[str] = dict((re.sub(R"@@$" , "" , __magic_name__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , __magic_name__ ), v) for k, v in d.items() )
_lowercase: Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
_lowercase: List[str] = d[k] # restore
return da
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
# prep
if not os.path.exists(__magic_name__ ):
raise ValueError(f"path {biogpt_checkpoint_path} does not exist!" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
_lowercase: int = os.path.join(__magic_name__ , "checkpoint.pt" )
if not os.path.isfile(__magic_name__ ):
raise ValueError(f"path to the file {checkpoint_file} does not exist!" )
_lowercase: Dict = torch.load(__magic_name__ , map_location="cpu" )
_lowercase: Optional[int] = chkpt["cfg"]["model"]
# dicts
_lowercase: int = os.path.join(__magic_name__ , "dict.txt" )
if not os.path.isfile(__magic_name__ ):
raise ValueError(f"path to the file {dict_file} does not exist!" )
_lowercase: Union[str, Any] = Dictionary.load(__magic_name__ )
_lowercase: Tuple = rewrite_dict_keys(src_dict.indices )
_lowercase: Tuple = len(__magic_name__ )
_lowercase: List[Any] = os.path.join(__magic_name__ , VOCAB_FILES_NAMES["vocab_file"] )
print(f"Generating {src_vocab_file} of {src_vocab_size} records" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) )
# merges_file (bpecodes)
_lowercase: Tuple = os.path.join(__magic_name__ , "bpecodes" )
if not os.path.isfile(__magic_name__ ):
raise ValueError(f"path to the file {bpecodes_file} does not exist!" )
_lowercase: Dict = os.path.join(__magic_name__ , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(__magic_name__ , __magic_name__ )
# model config
_lowercase: Optional[Any] = os.path.join(__magic_name__ , "config.json" )
_lowercase: Any = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f"Generating {biogpt_model_config_file}" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) )
# tokenizer config
_lowercase: Dict = os.path.join(__magic_name__ , __magic_name__ )
_lowercase: Optional[int] = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_0_2_4,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f"Generating {biogpt_tokenizer_config_file}" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(__magic_name__ , ensure_ascii=__magic_name__ , indent=__magic_name__ ) )
# model
_lowercase: Optional[int] = chkpt["model"]
# remove unneeded keys
_lowercase: int = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(__magic_name__ , __magic_name__ )
_lowercase: int = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
_lowercase: Tuple = model_state_dict.pop(__magic_name__ )
else:
_lowercase: Any = model_state_dict.pop(__magic_name__ )
_lowercase: List[str] = BioGptConfig.from_pretrained(__magic_name__ )
_lowercase: int = BioGptForCausalLM(__magic_name__ )
# check that it loads ok
model_new.load_state_dict(__magic_name__ )
# save
_lowercase: Tuple = os.path.join(__magic_name__ , __magic_name__ )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(__magic_name__ , __magic_name__ )
print("Conversion is done!" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 226
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__:str = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__:List[str] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__:List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__:Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__:Optional[int] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__:Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 528
| 0
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple ) -> None:
'''simple docstring'''
__UpperCamelCase =[2, 1, 2, -1]
__UpperCamelCase =[1, 2, 3, 4]
def UpperCAmelCase_ ( self : List[str] ) -> list[float]:
'''simple docstring'''
__UpperCamelCase =len(self.first_signal )
__UpperCamelCase =len(self.second_signal )
__UpperCamelCase =max(UpperCamelCase__ , UpperCamelCase__ )
# create a zero matrix of max_length x max_length
__UpperCamelCase =[[0] * max_length for i in range(UpperCamelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase__ ):
__UpperCamelCase =deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase__ )
for j, item in enumerate(UpperCamelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCamelCase =np.matmul(np.transpose(UpperCamelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 710
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''vit'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=16 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =encoder_stride
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
'''simple docstring'''
return 1E-4
| 296
| 0
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=False , ) -> Dict:
"""simple docstring"""
_A = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
_A = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_center_crop
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_reduce_labels
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCAmelCase__ ( ) -> Optional[Any]:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(dataset[0]['''file'''] )
_A = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCAmelCase__ ( ) -> Dict:
_A = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A = Image.open(ds[0]['''file'''] )
_A = Image.open(ds[1]['''file'''] )
_A = Image.open(ds[2]['''file'''] )
_A = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _snake_case ( lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = BeitImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = BeitImageProcessingTester(self )
@property
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , '''do_resize''' ) )
self.assertTrue(hasattr(a , '''size''' ) )
self.assertTrue(hasattr(a , '''do_center_crop''' ) )
self.assertTrue(hasattr(a , '''center_crop''' ) )
self.assertTrue(hasattr(a , '''do_normalize''' ) )
self.assertTrue(hasattr(a , '''image_mean''' ) )
self.assertTrue(hasattr(a , '''image_std''' ) )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 2_0, '''width''': 2_0} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
self.assertEqual(image_processor.do_reduce_labels , a )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , crop_size=8_4 , reduce_labels=a )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
self.assertEqual(image_processor.do_reduce_labels , a )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_A = image_processing(a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
_A = []
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_A = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched
_A = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
# Test batched input (PIL images)
_A , _A = prepare_semantic_batch_inputs()
_A = image_processing(a , a , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_A , _A = prepare_semantic_single_inputs()
_A = image_processing(a , a , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_5_0 )
_A = True
_A = image_processing(a , a , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_5_5 )
| 317
|
from __future__ import annotations
import math
def UpperCAmelCase__ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int:
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def UpperCAmelCase__ ( ) -> None:
_A = [90, 23, 6, 33, 21, 65, 123, 34_423]
_A = math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 317
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_lowercase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_lowercase , default=5 )
parser.add_argument("--batch_size" , type=_lowercase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_lowercase , default=1 )
parser.add_argument("--freeze" , type=_lowercase , default=_lowercase )
parser.add_argument("--learning_rate" , type=_lowercase , default=5E-4 )
parser.add_argument("--seed" , type=_lowercase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_lowercase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_lowercase , default=1_0 )
parser.add_argument("--weight_decay" , type=_lowercase , default=0.01 )
parser.add_argument("--output_dir" , type=_lowercase , default="./results" )
return parser.parse_args()
A = load("accuracy")
def __UpperCAmelCase ( __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = eval_pred
UpperCAmelCase__ = np.argmax(_lowercase , axis=1 )
return metric.compute(predictions=_lowercase , references=_lowercase )
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , _lowercase : int ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = trainer
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int , **_lowercase : Any ):
"""simple docstring"""
if control.should_evaluate:
UpperCAmelCase__ = deepcopy(UpperCamelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def __UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = get_args()
set_seed(args.seed )
UpperCAmelCase__ = load_dataset("codeparrot/codecomplex" , split="train" )
UpperCAmelCase__ = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase__ = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase__ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ = tokenizer.eos_token
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase__ = False
UpperCAmelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(__A ):
UpperCAmelCase__ = tokenizer(example["src"] , truncation=_lowercase , max_length=1_0_2_4 )
UpperCAmelCase__ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase__ = train_test_validation.map(
_lowercase , batched=_lowercase , remove_columns=train_test_validation["train"].column_names , )
UpperCAmelCase__ = DataCollatorWithPadding(tokenizer=_lowercase )
UpperCAmelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
UpperCAmelCase__ = Trainer(
model=_lowercase , args=_lowercase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
print("Training..." )
trainer.add_callback(CustomCallback(_lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 704
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Tuple = XLMTokenizer
a :Any = False
def _lowercase ( self : List[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
lowercase_ = '''lower newer'''
lowercase_ = '''lower newer'''
return input_text, output_text
def _lowercase ( self : Union[str, Any] ) -> Any:
lowercase_ = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase_ = '''lower'''
lowercase_ = ['''low''', '''er</w>''']
lowercase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = tokens + ['''<unk>''']
lowercase_ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : int ) -> List[str]:
lowercase_ = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowercase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 97
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : Dict ):
lowerCamelCase :int = sys.stdout
lowerCamelCase :str = open(__snake_case , '''a''' )
def __getattr__( self : int , __snake_case : Union[str, Any] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Tuple , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Union[str, Any]=80 , a_ : str=False):
lowerCamelCase :str = []
# deal with critical env vars
lowerCamelCase :Optional[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[str] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : Optional[int] , a_ : Dict):
# unwrap multi-line input
lowerCamelCase :int = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : int , a_ : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : Any):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :List[Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Union[str, Any] = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :int = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , ):
lowerCamelCase :Optional[Any] = []
lowerCamelCase :List[Any] = []
lowerCamelCase :List[str] = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :Tuple = F"{preamble}: "
lowerCamelCase :Any = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Optional[Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :int = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :Dict = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :List[str] = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Tuple = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Union[str, Any] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Optional[Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :str = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : List[str] , a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : int):
lowerCamelCase :List[str] = pd.DataFrame(a_)
lowerCamelCase :int = '''variation'''
lowerCamelCase :Tuple = '''diff_%'''
lowerCamelCase :List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :str = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Optional[Any] = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :str = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :Dict = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Any = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Tuple = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :List[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Tuple = parser.parse_args()
lowerCamelCase :Dict = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[Any] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Union[str, Any] = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Optional[Any] = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[int] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Union[str, Any] = '''variation'''
lowerCamelCase :Optional[int] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 166
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : Optional[int] = Features({"""image""": Image()} )
snake_case__ : Any = Features({"""labels""": ClassLabel} )
snake_case__ : Dict = """image"""
snake_case__ : Tuple = """labels"""
def _A ( self : Optional[Any] , __lowerCamelCase : int ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase :List[str] = copy.deepcopy(self )
UpperCamelCase :Union[str, Any] = self.label_schema.copy()
UpperCamelCase :Union[str, Any] = features[self.label_column]
UpperCamelCase :Union[str, Any] = label_schema
return task_template
@property
def _A ( self : Dict ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 716
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ : int
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
snake_case__ : jnp.dtype = jnp.floataa
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase :List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase :Optional[Any] = self.block_out_channels[i]
UpperCamelCase :List[Any] = self.block_out_channels[i + 1]
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :Tuple = blocks
UpperCamelCase :Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.conv_in(__lowerCamelCase )
UpperCamelCase :Optional[Any] = nn.silu(__lowerCamelCase )
for block in self.blocks:
UpperCamelCase :Tuple = block(__lowerCamelCase )
UpperCamelCase :List[str] = nn.silu(__lowerCamelCase )
UpperCamelCase :Dict = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
snake_case__ : int = 3_2
snake_case__ : int = 4
snake_case__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ : Union[bool, Tuple[bool]] = False
snake_case__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case__ : int = 2
snake_case__ : Union[int, Tuple[int]] = 8
snake_case__ : Optional[Union[int, Tuple[int]]] = None
snake_case__ : int = 1_2_8_0
snake_case__ : float = 0.0
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : str = "rgb"
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _A ( self : int , __lowerCamelCase : jax.random.KeyArray ):
# init input tensors
UpperCamelCase :int = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase :Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase :int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase :Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase :Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase :int = jax.random.split(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def _A ( self : int ):
UpperCamelCase :Dict = self.block_out_channels
UpperCamelCase :Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase :List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase :Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase :Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase :Tuple = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
UpperCamelCase :List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase :Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = block_out_channels[0]
UpperCamelCase :Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase :List[str] = output_channel
UpperCamelCase :Optional[Any] = block_out_channels[i]
UpperCamelCase :Tuple = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase :List[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
UpperCamelCase :str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = down_blocks
UpperCamelCase :Optional[Any] = controlnet_down_blocks
# mid
UpperCamelCase :str = block_out_channels[-1]
UpperCamelCase :Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ):
UpperCamelCase :Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase :List[Any] = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
UpperCamelCase :Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase :Optional[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
UpperCamelCase :Optional[Any] = self.time_proj(__lowerCamelCase )
UpperCamelCase :Any = self.time_embedding(__lowerCamelCase )
# 2. pre-process
UpperCamelCase :int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Dict = self.conv_in(__lowerCamelCase )
UpperCamelCase :Any = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Optional[int] = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase :int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase , UpperCamelCase :Optional[Any] = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase :List[str] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase :str = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
UpperCamelCase :Any = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase :Optional[Any] = controlnet_down_block_res_samples
UpperCamelCase :str = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
UpperCamelCase :str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 590
| 0
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple=1024 , lowerCAmelCase : List[str]=1024 , lowerCAmelCase : int=3.6 )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = tokenizer
UpperCAmelCase = tokenizer.bos_token_id
UpperCAmelCase = dataset
UpperCAmelCase = seq_length
UpperCAmelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = iter(self.dataset )
UpperCAmelCase = True
while more_examples:
UpperCAmelCase , UpperCAmelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase = False
break
UpperCAmelCase = tokenizer(lowerCAmelCase , truncation=lowerCAmelCase )['''input_ids''']
UpperCAmelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase ) , self.seq_length ):
UpperCAmelCase = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase ) == self.seq_length:
yield torch.tensor(lowerCAmelCase )
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = {'''streaming''': True}
UpperCAmelCase = load_dataset(args.dataset_name , split='''train''' , **A )
UpperCAmelCase = ConstantLengthDataset(A , A , seq_length=args.seq_length )
UpperCAmelCase = DataLoader(A , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
model.eval()
UpperCAmelCase = []
for step, batch in enumerate(A ):
with torch.no_grad():
UpperCAmelCase = model(A , labels=A )
UpperCAmelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase = torch.mean(torch.cat(A ) )
try:
UpperCAmelCase = torch.exp(A )
except OverflowError:
UpperCAmelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowercase : Any = Accelerator()
# Parse configuration
_lowercase : Dict = HfArgumentParser(EvaluationArguments)
_lowercase : Optional[int] = parser.parse_args()
set_seed(args.seed)
# Logging
_lowercase : Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
_lowercase : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowercase : str = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowercase : Any = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowercase , _lowercase : Optional[int] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
_lowercase , _lowercase : Union[str, Any] = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 210
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : List[str] )-> int:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] )-> int:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def a__( self : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class UpperCamelCase__( datasets.BeamBasedBuilder ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=lowerCAmelCase , )
def a__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def a__( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class UpperCamelCase__( lowerCAmelCase ):
@require_beam
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
import apache_beam as beam
UpperCAmelCase = beam.io.parquetio.WriteToParquet
UpperCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , lowerCAmelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , lowerCAmelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 210
| 1
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
ops.enable_eager_execution_internal()
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.config.list_physical_devices("""CPU""" )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
SCREAMING_SNAKE_CASE__ : str = tf.config.list_logical_devices(device_type="""CPU""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
SCREAMING_SNAKE_CASE__ : Tuple = GradientAccumulator()
SCREAMING_SNAKE_CASE__ : Any = tf.Variable([4.0, 3.0] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = create_optimizer(5E-5 , 10 , 5 )
SCREAMING_SNAKE_CASE__ : int = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ )
def accumulate_on_replica(SCREAMING_SNAKE_CASE__ ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with strategy.scope():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ )
local_variables[0].assign(SCREAMING_SNAKE_CASE__ )
local_variables[1].assign(SCREAMING_SNAKE_CASE__ )
strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(SCREAMING_SNAKE_CASE__ )
def _check_local_values(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 545
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
UpperCAmelCase__ : Any = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''bertabs'''
def __init__(self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.2 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=0.2 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Any = max_pos
SCREAMING_SNAKE_CASE__ : List[Any] = enc_layers
SCREAMING_SNAKE_CASE__ : Tuple = enc_hidden_size
SCREAMING_SNAKE_CASE__ : Dict = enc_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = enc_ff_size
SCREAMING_SNAKE_CASE__ : Dict = enc_dropout
SCREAMING_SNAKE_CASE__ : Any = dec_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dec_hidden_size
SCREAMING_SNAKE_CASE__ : Any = dec_heads
SCREAMING_SNAKE_CASE__ : List[Any] = dec_ff_size
SCREAMING_SNAKE_CASE__ : str = dec_dropout
| 545
| 1
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase__ : str = logging.get_logger(__name__)
UpperCamelCase__ : int = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = '''umt5'''
__a : int = ['''past_key_values''']
def __init__( self ,snake_case__=250112 ,snake_case__=512 ,snake_case__=64 ,snake_case__=1024 ,snake_case__=8 ,snake_case__=None ,snake_case__=6 ,snake_case__=32 ,snake_case__=128 ,snake_case__=0.1 ,snake_case__=1E-6 ,snake_case__=1.0 ,snake_case__="gated-gelu" ,snake_case__=True ,snake_case__=True ,snake_case__="T5Tokenizer" ,snake_case__=True ,snake_case__=0 ,snake_case__=1 ,snake_case__=0 ,**snake_case__ ,):
super().__init__(
is_encoder_decoder=a__ ,tokenizer_class=a__ ,tie_word_embeddings=a__ ,pad_token_id=a__ ,eos_token_id=a__ ,decoder_start_token_id=a__ ,**a__ ,)
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = d_model
SCREAMING_SNAKE_CASE_ : str = d_kv
SCREAMING_SNAKE_CASE_ : Tuple = d_ff
SCREAMING_SNAKE_CASE_ : Optional[int] = num_layers
SCREAMING_SNAKE_CASE_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_ : Tuple = num_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ : List[Any] = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dropout_rate
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : str = initializer_factor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = feed_forward_proj
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = self.feed_forward_proj.split('-' )
SCREAMING_SNAKE_CASE_ : str = act_info[-1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = act_info[0] == 'gated'
if len(a__ ) > 1 and act_info[0] != "gated" or len(a__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_ : int = 'gelu_new'
@property
def snake_case ( self ):
return self.d_model
@property
def snake_case ( self ):
return self.num_heads
@property
def snake_case ( self ):
return self.num_layers
class lowerCAmelCase_ ( lowerCamelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
SCREAMING_SNAKE_CASE_ : Dict = 'past_encoder_sequence + sequence'
SCREAMING_SNAKE_CASE_ : List[str] = {0: 'batch'}
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ : Dict = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE_ : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a__ ,direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case ( self ):
return 13
@property
def snake_case ( self ):
return 5E-4
| 105
|
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
A_ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : int = 'luke'
def __init__( self , _SCREAMING_SNAKE_CASE=5_02_67 , _SCREAMING_SNAKE_CASE=50_00_00 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Any = entity_vocab_size
__lowerCAmelCase : Optional[Any] = hidden_size
__lowerCAmelCase : Dict = entity_emb_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : Tuple = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : List[str] = type_vocab_size
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps
__lowerCAmelCase : List[Any] = use_entity_aware_attention
__lowerCAmelCase : Optional[int] = classifier_dropout
| 549
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = start
__lowerCAmelCase : Union[str, Any] = end
__lowerCAmelCase : Union[str, Any] = val
__lowerCAmelCase : Dict = (start + end) // 2
__lowerCAmelCase : Dict = left
__lowerCAmelCase : List[Any] = right
def __repr__( self ):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = collection
__lowerCAmelCase : Optional[int] = function
if self.collection:
__lowerCAmelCase : str = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
__lowerCAmelCase : Tuple = (start + end) // 2
__lowerCAmelCase : List[str] = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == i:
__lowerCAmelCase : int = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.fn(node.left.val , node.right.val )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if self.root is not None:
__lowerCAmelCase : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCAmelCase : str = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowerCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 549
| 1
|
from collections import deque
from .hash_table import HashTable
class __A( UpperCAmelCase ):
def __init__( self : int , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
lowerCamelCase_ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__UpperCamelCase )
lowerCamelCase_ = self.values[key]
def lowercase__ ( self : Tuple ):
return (
sum(self.charge_factor - len(__UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(__UpperCamelCase , __UpperCamelCase )
| 272
|
import os
import sys
lowercase = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str] ) -> List[str]:
return AutoConfig.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Any:
return AutoTokenizer.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> Union[str, Any]:
return AutoModel.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Dict ) -> int:
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Dict ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowerCAmelCase ( *UpperCAmelCase__ : Any , **UpperCAmelCase__ : List[str] ) -> Union[str, Any]:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 272
| 1
|
def A_( A = 10 ):
if not isinstance(A , A ) or n < 0:
raise ValueError("""Invalid input""" )
UpperCAmelCase_ = 10**n
UpperCAmelCase_ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"{solution(10) = }")
| 486
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCamelCase__ : Union[str, Any] = 16
UpperCamelCase__ : Tuple = 32
def A_( A , A = 16 ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
A , batched=A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
A , padding="""longest""" , max_length=A , pad_to_multiple_of=A , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCamelCase__ : str = mocked_dataloaders # noqa: F811
def A_( A , A ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , A ) == "1":
UpperCAmelCase_ = 2
# New Code #
UpperCAmelCase_ = int(args.gradient_accumulation_steps )
# Initialize accelerator
UpperCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["""lr"""]
UpperCAmelCase_ = int(config["""num_epochs"""] )
UpperCAmelCase_ = int(config["""seed"""] )
UpperCAmelCase_ = int(config["""batch_size"""] )
UpperCAmelCase_ = evaluate.load("""glue""" , """mrpc""" )
set_seed(A )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
A , A , A , A , A )
# Now we train the model
for epoch in range(A ):
model.train()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A ):
UpperCAmelCase_ = model(**A )
UpperCAmelCase_ = output.loss
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**A )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
def A_( ):
UpperCAmelCase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=A , default=A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=A , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 486
| 1
|
import collections
import os
import re
from pathlib import Path
_UpperCamelCase = "src/transformers"
# Matches is_xxx_available()
_UpperCamelCase = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_UpperCamelCase = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCamelCase = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_UpperCamelCase = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_UpperCamelCase = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCamelCase = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCamelCase = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCamelCase = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_UpperCamelCase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_UpperCamelCase = re.compile(r"^\s*try:")
# Catches a line with else:
_UpperCamelCase = re.compile(r"^\s*else:")
def _lowercase ( lowercase__ ):
if _re_test_backend.search(lowercase__ ) is None:
return None
__lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(lowercase__ )]
backends.sort()
return "_and_".join(lowercase__ )
def _lowercase ( lowercase__ ):
with open(lowercase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowerCAmelCase : str = f.readlines()
__lowerCAmelCase : Optional[int] = 0
while line_index < len(lowercase__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase : str = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase : Dict = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase__ ):
__lowerCAmelCase : List[Any] = _re_one_line_import_struct.search(lowercase__ ).groups()[0]
__lowerCAmelCase : Any = re.findall(r'''\[([^\]]+)\]''' , lowercase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
__lowerCAmelCase : List[Any] = _re_import_struct_key_value.search(lowercase__ )
if single_line_import_search is not None:
__lowerCAmelCase : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase : Optional[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase : Optional[int] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
__lowerCAmelCase : Dict = lines[line_index]
if _re_import_struct_add_one.search(lowercase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase__ ) is not None:
__lowerCAmelCase : int = _re_import_struct_add_many.search(lowercase__ ).groups()[0].split(''', ''' )
__lowerCAmelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_between_brackets.search(lowercase__ ) is not None:
__lowerCAmelCase : Union[str, Any] = _re_between_brackets.search(lowercase__ ).groups()[0].split(''', ''' )
__lowerCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_quote_object.search(lowercase__ ) is not None:
objects.append(_re_quote_object.search(lowercase__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
__lowerCAmelCase : str = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase : List[Any] = []
while (
line_index < len(lowercase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
__lowerCAmelCase : Union[str, Any] = lines[line_index]
__lowerCAmelCase : Tuple = _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
__lowerCAmelCase : List[str] = lines[line_index]
__lowerCAmelCase : Tuple = _re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
__lowerCAmelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowercase ( lowercase__ , lowercase__ ):
def find_duplicates(lowercase__ ):
return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase : Optional[Any] = []
for key in import_dict_objects.keys():
__lowerCAmelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowerCAmelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase : int = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _lowercase ( ):
__lowerCAmelCase : Tuple = []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
__lowerCAmelCase : List[str] = os.path.join(lowercase__ , '''__init__.py''' )
__lowerCAmelCase : int = parse_init(lowercase__ )
if objects is not None:
__lowerCAmelCase : List[Any] = analyze_results(*lowercase__ )
if len(lowercase__ ) > 0:
__lowerCAmelCase : Dict = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowercase__ ) )
if len(lowercase__ ) > 0:
raise ValueError('''\n\n'''.join(lowercase__ ) )
def _lowercase ( ):
__lowerCAmelCase : List[str] = []
for path, directories, files in os.walk(lowercase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(lowercase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
__lowerCAmelCase : List[Any] = str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) )
__lowerCAmelCase : List[Any] = short_path.replace(os.path.sep , '''.''' )
submodules.append(lowercase__ )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase : Any = str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) )
__lowerCAmelCase : Union[str, Any] = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(lowercase__ )
return submodules
_UpperCamelCase = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def _lowercase ( ):
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
__lowerCAmelCase : Optional[int] = direct_transformers_import(lowercase__ )
__lowerCAmelCase : Optional[int] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowercase__ , '''__init__.py''' ) , '''r''' ) as f:
__lowerCAmelCase : Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , lowercase__ ) ) )
__lowerCAmelCase : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowercase__ ) > 0:
__lowerCAmelCase : Dict = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 492
|
_UpperCamelCase = {str(digit): digit**5 for digit in range(10)}
def _lowercase ( lowercase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowercase__ ) )
def _lowercase ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowercase__ ) )
if __name__ == "__main__":
print(solution())
| 492
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase :str = logging.getLogger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''summarization'''
snake_case__ : int =['''loss''']
snake_case__ : Optional[int] =ROUGE_KEYS
snake_case__ : str ='''rouge2'''
def __init__( self: int , __a: List[Any] , **__a: List[Any] )-> Optional[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCamelCase : str = Path(self.output_dir ) / """metrics.json"""
lowerCamelCase : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase : Optional[int] = 0
lowerCamelCase : int = defaultdict(__a )
lowerCamelCase : Union[str, Any] = self.config.model_type
lowerCamelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase : str = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCamelCase : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase : Optional[int] = get_git_info()["""repo_sha"""]
lowerCamelCase : str = hparams.num_workers
lowerCamelCase : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
lowerCamelCase : Any = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase : List[str] = self.decoder_start_token_id
lowerCamelCase : str = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase : Optional[Any] = self.hparams.eval_max_gen_length
else:
lowerCamelCase : Optional[Any] = self.model.config.max_length
lowerCamelCase : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def a__ ( self: Any , __a: Dict[str, torch.Tensor] )-> Dict[str, List[str]]:
lowerCamelCase : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCamelCase : Dict = True
return readable_batch
def a__ ( self: List[str] , __a: Any , **__a: Any )-> List[Any]:
return self.model(__a , **__a )
def a__ ( self: int , __a: List[int] )-> str:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> Tuple:
lowerCamelCase : Any = self.tokenizer.pad_token_id
lowerCamelCase , lowerCamelCase : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
lowerCamelCase : Optional[Any] = batch["""labels"""]
if isinstance(self.model , __a ):
lowerCamelCase : Optional[int] = self.model._shift_right(__a )
else:
lowerCamelCase : List[str] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase : Tuple = decoder_input_ids
self.save_readable_batch(__a )
lowerCamelCase : Optional[Any] = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
lowerCamelCase : Union[str, Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase : int = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase : Union[str, Any] = nn.functional.log_softmax(__a , dim=-1 )
lowerCamelCase , lowerCamelCase : List[Any] = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def a__ ( self: List[str] )-> int:
return self.tokenizer.pad_token_id
def a__ ( self: str , __a: Optional[Any] , __a: Dict )-> Dict:
lowerCamelCase : Union[str, Any] = self._step(__a )
lowerCamelCase : Optional[int] = dict(zip(self.loss_names , __a ) )
# tokens per batch
lowerCamelCase : int = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCamelCase : int = batch["""input_ids"""].shape[0]
lowerCamelCase : int = batch["""input_ids"""].eq(self.pad ).sum()
lowerCamelCase : List[str] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def a__ ( self: int , __a: List[str] , __a: Any )-> Dict:
return self._generative_step(__a )
def a__ ( self: Any , __a: Any , __a: Any="val" )-> Dict:
self.step_count += 1
lowerCamelCase : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase : str = losses["""loss"""]
lowerCamelCase : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCamelCase : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
lowerCamelCase : str = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCamelCase : int = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
lowerCamelCase : Union[str, Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def a__ ( self: List[str] , __a: List[Any] , __a: Union[str, Any] )-> Dict:
return calculate_rouge(__a , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> dict:
lowerCamelCase : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase : str = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCamelCase : List[str] = self.ids_to_clean_text(__a )
lowerCamelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
lowerCamelCase : str = self._step(__a )
lowerCamelCase : Union[str, Any] = dict(zip(self.loss_names , __a ) )
lowerCamelCase : Dict = self.calc_generative_metrics(__a , __a )
lowerCamelCase : List[str] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def a__ ( self: List[str] , __a: Union[str, Any] , __a: Dict )-> Any:
return self._generative_step(__a )
def a__ ( self: Any , __a: Optional[int] )-> List[str]:
return self.validation_epoch_end(__a , prefix="""test""" )
def a__ ( self: str , __a: int )-> SeqaSeqDataset:
lowerCamelCase : Union[str, Any] = self.n_obs[type_path]
lowerCamelCase : int = self.target_lens[type_path]
lowerCamelCase : Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def a__ ( self: Optional[int] , __a: str , __a: int , __a: bool = False )-> DataLoader:
lowerCamelCase : List[str] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase : Union[str, Any] = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def a__ ( self: Optional[int] )-> DataLoader:
lowerCamelCase : Tuple = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def a__ ( self: List[str] )-> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def a__ ( self: Dict )-> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def a__ ( __a: str , __a: Optional[int] )-> Dict:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__a )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__a )
parser.add_argument("""--max_tokens_per_batch""" , type=__a , default=__a )
parser.add_argument("""--logger_name""" , type=__a , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__a , default=500 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__a , default="""summarization""" , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__a , default=0.0 , required=__a )
parser.add_argument("""--src_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--tgt_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--eval_beams""" , type=__a , default=__a , required=__a )
parser.add_argument(
"""--val_metric""" , type=__a , default=__a , required=__a , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__a , default=__a , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__a , default=1 , required=__a , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__a , default=-1 , required=__a , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''translation'''
snake_case__ : Dict =['''loss''']
snake_case__ : Any =['''bleu''']
snake_case__ : List[str] ='''bleu'''
def __init__( self: Dict , __a: Union[str, Any] , **__a: Any )-> Union[str, Any]:
super().__init__(__a , **__a )
lowerCamelCase : List[str] = hparams.src_lang
lowerCamelCase : List[Any] = hparams.tgt_lang
def a__ ( self: List[str] , __a: Dict , __a: Dict )-> dict:
return calculate_bleu(__a , __a )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase : SummarizationModule = SummarizationModule(UpperCamelCase__ )
else:
lowerCamelCase : SummarizationModule = TranslationModule(UpperCamelCase__ )
lowerCamelCase : str = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCamelCase : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Dict = os.environ.get("""WANDB_PROJECT""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Tuple = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCamelCase : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase : int = False
lowerCamelCase : Tuple = args.val_metric == """loss"""
lowerCamelCase : pl.Trainer = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCamelCase : Optional[int] = """"""
lowerCamelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=UpperCamelCase__ ) )
if checkpoints:
lowerCamelCase : List[Any] = checkpoints[-1]
lowerCamelCase : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
__lowerCamelCase :int = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase :Any = parser.parse_args()
main(args)
| 42
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 1
|
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1) != 0)
def lowercase__ ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0) == 0
assert or_gate(0 , 1) == 1
assert or_gate(1 , 0) == 1
assert or_gate(1 , 1) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 280
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Tuple = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 280
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCamelCase__ : Any = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = 1 / 255 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> str:
super().__init__(**_lowerCamelCase )
A_ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 256}
A_ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A_ : Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Union[str, Any] = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
A_ : Union[str, Any] = do_resize
A_ : Tuple = size
A_ : Optional[Any] = do_center_crop
A_ : Dict = crop_size
A_ : Any = resample
A_ : str = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Optional[int] = offset
A_ : Dict = do_normalize
A_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
if "shortest_edge" in size:
A_ : Union[str, Any] = get_resize_output_image_size(_lowerCamelCase , size["""shortest_edge"""] , default_to_square=_lowerCamelCase )
elif "height" in size and "width" in size:
A_ : List[Any] = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> Optional[Any]:
A_ : str = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(_lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ) -> str:
A_ : Union[str, Any] = image.astype(np.floataa )
if offset:
A_ : Dict = image - (scale / 2)
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ) -> List[Any]:
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , ) -> Dict:
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
A_ : Tuple = to_numpy_array(_lowerCamelCase )
if do_resize:
A_ : int = self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase )
if do_center_crop:
A_ : List[str] = self.center_crop(_lowerCamelCase , size=_lowerCamelCase )
if do_rescale:
A_ : str = self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase , offset=_lowerCamelCase )
if do_normalize:
A_ : List[Any] = self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase )
A_ : Union[str, Any] = to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase )
return image
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) -> Dict:
A_ : Tuple = do_resize if do_resize is not None else self.do_resize
A_ : Dict = resample if resample is not None else self.resample
A_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[str] = offset if offset is not None else self.offset
A_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : List[str] = image_mean if image_mean is not None else self.image_mean
A_ : Dict = image_std if image_std is not None else self.image_std
A_ : str = size if size is not None else self.size
A_ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase )
A_ : List[Any] = crop_size if crop_size is not None else self.crop_size
A_ : List[Any] = get_size_dict(_lowerCamelCase , param_name="""crop_size""" )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A_ : str = make_batched(_lowerCamelCase )
A_ : List[str] = [
[
self._preprocess_image(
image=_lowerCamelCase , do_resize=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , do_center_crop=_lowerCamelCase , crop_size=_lowerCamelCase , do_rescale=_lowerCamelCase , rescale_factor=_lowerCamelCase , offset=_lowerCamelCase , do_normalize=_lowerCamelCase , image_mean=_lowerCamelCase , image_std=_lowerCamelCase , data_format=_lowerCamelCase , )
for img in video
]
for video in videos
]
A_ : Tuple = {"""pixel_values""": videos}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
| 708
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : int = create_tensor(a_ )
A_ : Any = gather(a_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
A_ : List[str] = [state.process_index]
A_ : Optional[Any] = gather_object(a_ )
assert len(a_ ) == state.num_processes, F"{gathered_obj}, {len(a_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
A_ : List[str] = create_tensor(a_ )
A_ : Optional[Any] = broadcast(a_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.is_main_process:
A_ : Optional[int] = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A_ : Any = torch.arange(state.num_processes ).to(state.device )
A_ : Union[str, Any] = pad_across_processes(a_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : Tuple = create_tensor(a_ )
A_ : Optional[Any] = reduce(a_ , """sum""" )
A_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A_ : str = create_tensor(a_ )
A_ : int = reduce(a_ , """mean""" )
A_ : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a_ , a_ ), F"{reduced_tensor} != {truth_tensor}"
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
main()
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
A_ : Union[str, Any] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(a_ )
state.print("""testing gather_object""" )
test_gather_object(a_ )
state.print("""testing broadcast""" )
test_broadcast(a_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(a_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(a_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(a_ )
if __name__ == "__main__":
main()
| 385
| 0
|
'''simple docstring'''
import random
from typing import Any
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple ):
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase , UpperCAmelCase = data[b], data[a]
return data
if __name__ == "__main__":
_a : int = [0, 1, 2, 3, 4, 5, 6, 7]
_a : Tuple = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 447
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
| 0
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict ) -> List[str]:
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
lowerCAmelCase = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCAmelCase__ )
lowerCAmelCase = self.values[key]
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
return (
sum(self.charge_factor - len(UpperCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=None ) -> Optional[Any]:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCAmelCase__ , UpperCAmelCase__ )
| 513
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = (DDPMScheduler,)
def __UpperCAmelCase ( self : Optional[int] , **UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCAmelCase__ )
return config
def __UpperCAmelCase ( self : Dict ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
self.check_over_configs(thresholding=UpperCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Any ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self : Tuple ) -> Dict:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __UpperCAmelCase ( self : int ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase__ ) ):
# 1. predict noise residual
lowerCAmelCase = model(UpperCAmelCase__ , UpperCAmelCase__ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(UpperCAmelCase__ ) )
lowerCAmelCase = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__ ):
if i == len(UpperCAmelCase__ ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(UpperCAmelCase__ )
lowerCAmelCase = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : str ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCAmelCase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase = len(UpperCAmelCase__ )
with self.assertRaises(UpperCAmelCase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**UpperCAmelCase__ )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__ )
| 513
| 1
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
__lowerCamelCase = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__lowerCamelCase = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__lowerCamelCase = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__lowerCamelCase = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__lowerCamelCase = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__lowerCamelCase = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__lowerCamelCase = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__lowerCamelCase = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__lowerCamelCase = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__lowerCamelCase = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__lowerCamelCase = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__lowerCamelCase = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , *__UpperCAmelCase , num_workers=__UpperCAmelCase , batch_size=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase = 0 , __UpperCAmelCase = 512 / 1500 , __UpperCAmelCase = 32 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
__lowerCamelCase = load_image(__UpperCAmelCase )
__lowerCamelCase = self.image_processor.size['''longest_edge''']
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.image_processor.generate_crop_boxes(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__lowerCamelCase = self.get_inference_context()
with inference_context():
__lowerCamelCase = self._ensure_tensor_on_device(__UpperCAmelCase , device=self.device )
__lowerCamelCase = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__lowerCamelCase = image_embeddings
__lowerCamelCase = grid_points.shape[1]
__lowerCamelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = grid_points[:, i : i + points_per_batch, :, :]
__lowerCamelCase = input_labels[:, i : i + points_per_batch]
__lowerCamelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0.88 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , ):
'''simple docstring'''
__lowerCamelCase = model_inputs.pop('''input_boxes''' )
__lowerCamelCase = model_inputs.pop('''is_last''' )
__lowerCamelCase = model_inputs.pop('''original_sizes''' ).tolist()
__lowerCamelCase = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__lowerCamelCase = self.model(**__UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__lowerCamelCase = model_outputs['''pred_masks''']
__lowerCamelCase = self.image_processor.post_process_masks(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , binarize=__UpperCAmelCase )
__lowerCamelCase = model_outputs['''iou_scores''']
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.7 , ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase )
__lowerCamelCase = torch.cat(__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = self.image_processor.post_process_for_mask_generation(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = defaultdict(__UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__UpperCAmelCase )
__lowerCamelCase = {}
if output_rle_mask:
__lowerCamelCase = rle_mask
if output_bboxes_mask:
__lowerCamelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 175
|
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class _lowerCAmelCase ( __a ):
_lowercase ='''codegen'''
_lowercase ={
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=50_400 , _UpperCamelCase=2_048 , _UpperCamelCase=2_048 , _UpperCamelCase=4_096 , _UpperCamelCase=28 , _UpperCamelCase=16 , _UpperCamelCase=64 , _UpperCamelCase=None , _UpperCamelCase="gelu_new" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=50_256 , _UpperCamelCase=50_256 , _UpperCamelCase=False , **_UpperCamelCase , ) -> Dict:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = n_ctx
lowerCAmelCase_ = n_positions
lowerCAmelCase_ = n_embd
lowerCAmelCase_ = n_layer
lowerCAmelCase_ = n_head
lowerCAmelCase_ = n_inner
lowerCAmelCase_ = rotary_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = resid_pdrop
lowerCAmelCase_ = embd_pdrop
lowerCAmelCase_ = attn_pdrop
lowerCAmelCase_ = layer_norm_epsilon
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
super().__init__(
bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , tie_word_embeddings=_UpperCamelCase , **_UpperCamelCase )
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase = "default" , _UpperCamelCase = None , _UpperCamelCase = False , ) -> List[Any]:
super().__init__(_UpperCamelCase , task=_UpperCamelCase , patching_specs=_UpperCamelCase , use_past=_UpperCamelCase )
if not getattr(self._config , "pad_token_id" , _UpperCamelCase ):
# TODO: how to do that better?
lowerCAmelCase_ = 0
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCamelCase , direction="inputs" )
lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __a ( self ) -> int:
return self._config.n_layer
@property
def __a ( self ) -> int:
return self._config.n_head
def __a ( self , _UpperCamelCase , _UpperCamelCase = -1 , _UpperCamelCase = -1 , _UpperCamelCase = False , _UpperCamelCase = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ = super(_UpperCamelCase , self ).generate_dummy_inputs(
_UpperCamelCase , batch_size=_UpperCamelCase , seq_length=_UpperCamelCase , is_pair=_UpperCamelCase , framework=_UpperCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCAmelCase_ = seqlen + 2
lowerCAmelCase_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ = [
(torch.zeros(_UpperCamelCase ), torch.zeros(_UpperCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype
lowerCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCamelCase , _UpperCamelCase , dtype=_UpperCamelCase )] , dim=1 )
return ordered_inputs
@property
def __a ( self ) -> int:
return 13
| 708
|
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase__ ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = iter(__lowerCAmelCase )
while True:
lowerCAmelCase_ = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowerCAmelCase_ = ""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase_ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = prepare_input(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = generate_table(__lowerCAmelCase )
lowerCAmelCase_ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 279
| 0
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase_ = False
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = generator.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase__ = """cyberpunk 2077"""
UpperCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.dual_guided(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , text_to_image_strength=0.75 , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase__ = """A painting of a squirrel eating a burger """
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe.text_to_image(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase__ = pipe.image_variation(_UpperCAmelCase , generator=_UpperCAmelCase , output_type="""numpy""" ).images
UpperCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 603
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 603
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE_ (__A ):
'''simple docstring'''
_a = """unispeech-sat"""
def __init__( self : List[Any] , __a : Union[str, Any]=32 , __a : Union[str, Any]=768 , __a : Optional[Any]=12 , __a : str=12 , __a : List[str]=3_072 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.0 , __a : Union[str, Any]=0.0 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=0.02 , __a : int=1e-5 , __a : List[str]="group" , __a : int="gelu" , __a : Dict=(512, 512, 512, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , __a : Dict=(10, 3, 3, 3, 3, 2, 2) , __a : str=False , __a : Any=128 , __a : List[str]=16 , __a : List[str]=False , __a : str=True , __a : Optional[Any]=0.05 , __a : Dict=10 , __a : List[Any]=2 , __a : Tuple=0.0 , __a : Tuple=10 , __a : Any=0 , __a : Optional[int]=320 , __a : List[Any]=2 , __a : Tuple=0.1 , __a : List[Any]=100 , __a : Tuple=256 , __a : Any=256 , __a : Optional[Any]=0.1 , __a : Any="mean" , __a : Optional[Any]=False , __a : Dict=False , __a : Optional[Any]=256 , __a : Optional[Any]=(512, 512, 512, 512, 1_500) , __a : Optional[int]=(5, 3, 3, 1, 1) , __a : Optional[Any]=(1, 2, 3, 1, 1) , __a : str=512 , __a : Optional[int]=0 , __a : Tuple=1 , __a : List[Any]=2 , __a : int=504 , **__a : str , ) ->str:
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : List[Any] = feat_extract_norm
lowerCamelCase_ : str = feat_extract_activation
lowerCamelCase_ : str = list(UpperCamelCase__ )
lowerCamelCase_ : List[str] = list(UpperCamelCase__ )
lowerCamelCase_ : Union[str, Any] = list(UpperCamelCase__ )
lowerCamelCase_ : List[Any] = conv_bias
lowerCamelCase_ : Union[str, Any] = num_conv_pos_embeddings
lowerCamelCase_ : str = num_conv_pos_embedding_groups
lowerCamelCase_ : int = len(self.conv_dim )
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_act
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Tuple = hidden_dropout
lowerCamelCase_ : Optional[Any] = attention_dropout
lowerCamelCase_ : Any = activation_dropout
lowerCamelCase_ : Any = feat_proj_dropout
lowerCamelCase_ : str = final_dropout
lowerCamelCase_ : Optional[int] = layerdrop
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Optional[int] = num_clusters
lowerCamelCase_ : Tuple = do_stable_layer_norm
lowerCamelCase_ : str = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Dict = apply_spec_augment
lowerCamelCase_ : Any = mask_time_prob
lowerCamelCase_ : Optional[Any] = mask_time_length
lowerCamelCase_ : str = mask_time_min_masks
lowerCamelCase_ : int = mask_feature_prob
lowerCamelCase_ : int = mask_feature_length
lowerCamelCase_ : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : List[str] = num_codevectors_per_group
lowerCamelCase_ : Tuple = num_codevector_groups
lowerCamelCase_ : List[str] = contrastive_logits_temperature
lowerCamelCase_ : Optional[Any] = feat_quantizer_dropout
lowerCamelCase_ : Any = num_negatives
lowerCamelCase_ : Any = codevector_dim
lowerCamelCase_ : Tuple = proj_codevector_dim
lowerCamelCase_ : Optional[int] = diversity_loss_weight
# ctc loss
lowerCamelCase_ : Optional[int] = ctc_loss_reduction
lowerCamelCase_ : Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : Union[str, Any] = list(UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase_ : Any = list(UpperCamelCase__ )
lowerCamelCase_ : int = xvector_output_dim
@property
def _lowerCAmelCase ( self : Any ) ->Dict:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case__ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) ->None:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 171
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase: ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCamelCase: ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
_lowerCamelCase: str = "text"
_lowerCamelCase: str = "summary"
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 91
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 91
| 1
|
_UpperCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[Any] = 'Morse code here!'
print(__lowercase )
A_ : List[str] = encrypt(__lowercase )
print(__lowercase )
A_ : Tuple = decrypt(__lowercase )
print(__lowercase )
if __name__ == "__main__":
main()
| 704
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : List[Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) )
self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ : str = get_activation('gelu' )
A_ : int = get_activation('gelu_10' )
A_ : Optional[int] = torch_builtin(lowercase )
A_ : Tuple = geluaa(lowercase )
A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowercase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowercase ):
get_activation('bogus' )
with self.assertRaises(lowercase ):
get_activation(lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = get_activation('gelu' )
A_ : List[str] = 1
A_ : Optional[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowercase ):
A_ : str = acta.a
| 70
| 0
|
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = os.path.abspath(__UpperCAmelCase )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCamelCase_ : List[str] = tf.train.list_variables(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : Dict = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCamelCase_ : Tuple = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCamelCase_ : List[Any] = name[1:]
# figure out how many levels deep the name is
lowerCamelCase_ : Optional[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__UpperCAmelCase )
# read data
lowerCamelCase_ : List[str] = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
names.append('''/'''.join(__UpperCAmelCase ) )
arrays.append(__UpperCAmelCase )
logger.info(F"""Read a total of {len(__UpperCAmelCase ):,} layers""" )
# Sanity check
if len(set(__UpperCAmelCase ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__UpperCAmelCase ) )})""" )
lowerCamelCase_ : Optional[Any] = list(set(__UpperCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : Tuple = full_name.split('''/''' )
lowerCamelCase_ : List[Any] = model
lowerCamelCase_ : List[Any] = []
for i, m_name in enumerate(__UpperCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowerCamelCase_ : Any = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowerCamelCase_ : Union[str, Any] = getattr(__UpperCAmelCase , '''embeddings''' )
lowerCamelCase_ : Optional[int] = getattr(__UpperCAmelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowerCamelCase_ : Tuple = getattr(__UpperCAmelCase , '''encoder''' )
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , '''layer''' )
lowerCamelCase_ : int = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowerCamelCase_ : List[Any] = getattr(__UpperCAmelCase , '''pooler''' )
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowerCamelCase_ : Union[str, Any] = getattr(__UpperCAmelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowerCamelCase_ : int = getattr(__UpperCAmelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowerCamelCase_ : Dict = getattr(__UpperCAmelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowerCamelCase_ : Any = getattr(__UpperCAmelCase , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowerCamelCase_ : Optional[int] = getattr(__UpperCAmelCase , '''attention''' )
lowerCamelCase_ : Dict = getattr(__UpperCAmelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowerCamelCase_ : Tuple = getattr(__UpperCAmelCase , '''attention''' )
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , '''output''' )
lowerCamelCase_ : List[Any] = getattr(__UpperCAmelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowerCamelCase_ : Union[str, Any] = getattr(__UpperCAmelCase , '''attention''' )
lowerCamelCase_ : int = getattr(__UpperCAmelCase , '''output''' )
lowerCamelCase_ : Union[str, Any] = getattr(__UpperCAmelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowerCamelCase_ : Optional[int] = getattr(__UpperCAmelCase , '''output''' )
lowerCamelCase_ : Dict = getattr(__UpperCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowerCamelCase_ : Tuple = getattr(__UpperCAmelCase , '''output''' )
lowerCamelCase_ : int = getattr(__UpperCAmelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowerCamelCase_ : str = getattr(__UpperCAmelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowerCamelCase_ : List[str] = getattr(__UpperCAmelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowerCamelCase_ : List[Any] = getattr(__UpperCAmelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , '''intermediate''' )
lowerCamelCase_ : Optional[int] = getattr(__UpperCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowerCamelCase_ : str = getattr(__UpperCAmelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowerCamelCase_ : int = getattr(__UpperCAmelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowerCamelCase_ : Optional[int] = getattr(__UpperCAmelCase , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCamelCase_ : Tuple = '''.'''.join(__UpperCAmelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __UpperCAmelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , __UpperCAmelCase ):
lowerCamelCase_ : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCamelCase_ : Optional[Any] = array.transpose()
if pointer.shape == array.shape:
lowerCamelCase_ : List[str] = torch.from_numpy(__UpperCAmelCase )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
lowerCamelCase_ : Optional[Any] = BertConfig.from_json_file(__UpperCAmelCase )
lowerCamelCase_ : str = BertModel(__UpperCAmelCase )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__lowerCamelCase : Any = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 501
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {"""vocab_file""": """vocab.txt"""}
__lowerCamelCase : str = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
__lowerCamelCase : Optional[Any] = {
"""openbmb/cpm-ant-10b""": 1024,
}
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = collections.OrderedDict()
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as reader:
lowerCamelCase_ : Tuple = reader.readlines()
for index, token in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : str = token.rstrip('''\n''' )
lowerCamelCase_ : Any = index
return vocab
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Dict=200 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : List[Any] = unk_token
lowerCamelCase_ : List[str] = max_input_chars_per_word
def __UpperCamelCase ( self : List[Any] , UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[str] = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ : int = 0
lowerCamelCase_ : int = []
while start < len(UpperCamelCase_ ):
lowerCamelCase_ : Optional[int] = len(UpperCamelCase_ )
lowerCamelCase_ : int = None
while start < end:
lowerCamelCase_ : Optional[int] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = end
return sub_tokens
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ["input_ids", "attention_mask"]
A = False
def __init__( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]="<d>" , UpperCamelCase_ : Dict="</d>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : Union[str, Any]="<unk>" , UpperCamelCase_ : Optional[Any]="</n>" , UpperCamelCase_ : str="</_>" , UpperCamelCase_ : str="left" , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase_ , eod_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , line_token=UpperCamelCase_ , space_token=UpperCamelCase_ , padding_side=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCamelCase_ : List[str] = bod_token
lowerCamelCase_ : List[str] = eod_token
lowerCamelCase_ : Any = load_vocab(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = self.encoder[space_token]
lowerCamelCase_ : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
lowerCamelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCamelCase_ : Tuple = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = []
for x in jieba.cut(UpperCamelCase_ , cut_all=UpperCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase_ ) )
return output_tokens
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : str = [i for i in token_ids if i >= 0]
lowerCamelCase_ : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Any ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[str] ) -> str:
"""simple docstring"""
return "".join(UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(UpperCamelCase_ ):
lowerCamelCase_ : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowerCamelCase_ : Optional[Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowerCamelCase_ : Optional[Any] = 0
if " " in self.encoder:
lowerCamelCase_ : Tuple = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ : List[str] = self.encoder['''\n''']
del self.encoder["\n"]
lowerCamelCase_ : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase_ : x[1] ) )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowerCamelCase_ : List[Any] = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ ))
return [1] + ([0] * len(UpperCamelCase_ ))
| 501
| 1
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ : Tuple = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ : List[Any] = Counter(_lowercase )
lowercase__ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False ) -> List[str]:
lowercase__ : Optional[Any] = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ : Union[str, Any] = full_content[1:].index('''---''' ) + 1
lowercase__ : int = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_lowerCamelCase )
class UpperCAmelCase ( __snake_case ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase ) -> Dict:
with open(_lowercase , encoding='''utf-8''' ) as readme_file:
lowercase__ : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
if path.exists():
with open(_lowercase , encoding='''utf-8''' ) as readme_file:
lowercase__ : str = readme_file.read()
else:
lowercase__ : List[str] = None
lowercase__ : Optional[Any] = self._to_readme(_lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(_lowercase )
def _lowerCAmelCase( self , __lowerCAmelCase = None ) -> Optional[Any]:
if readme_content is not None:
lowercase__ : Tuple = _split_yaml_from_readme(_lowercase )
lowercase__ : str = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
lowercase__ : Union[str, Any] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : int = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ : Tuple = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding='''utf-8''' , ).decode('''utf-8''' )
__a: List[str] = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__a: Optional[int] = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
__a: Optional[int] = ap.parse_args()
__a: Tuple = Path(args.readme_filepath)
__a: Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 719
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[Any] = 0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__SCREAMING_SNAKE_CASE :Union[str, Any] = 16
__SCREAMING_SNAKE_CASE :Optional[int] = 32
def UpperCAmelCase_ ( __lowercase : Accelerator , __lowercase : int = 16 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" )
_UpperCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowercase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCAmelCase = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
_UpperCAmelCase = 8
else:
_UpperCAmelCase = None
return tokenizer.pad(
lowerCAmelCase__ , padding="longest" , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors="pt" , )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
_UpperCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__SCREAMING_SNAKE_CASE :Union[str, Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : List[Any] ) -> Any:
'''simple docstring'''
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase__ ) == "1":
_UpperCAmelCase = 2
# Initialize accelerator
_UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['lr']
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
_UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
_UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
_UpperCAmelCase = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
_UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_UpperCAmelCase = model(**lowerCAmelCase__ )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**lowerCAmelCase__ )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
_UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCAmelCase__ )
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 236
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
A = '''Create a default config file for Accelerate with only a few flags set.'''
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Any="no" , lowerCAmelCase__ : str = default_json_config_file , lowerCAmelCase__ : bool = False) -> Optional[int]:
'''simple docstring'''
_lowercase : List[Any] = Path(lowerCAmelCase__)
path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__)
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''')
return False
_lowercase : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''')
_lowercase : str = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_lowercase : str = torch.cuda.device_count()
_lowercase : Optional[Any] = num_gpus
_lowercase : Tuple = False
if num_gpus > 1:
_lowercase : Tuple = 'MULTI_GPU'
else:
_lowercase : Any = 'NO'
elif is_xpu_available() and use_xpu:
_lowercase : List[str] = torch.xpu.device_count()
_lowercase : Union[str, Any] = num_xpus
_lowercase : List[Any] = False
if num_xpus > 1:
_lowercase : Tuple = 'MULTI_XPU'
else:
_lowercase : List[str] = 'NO'
elif is_npu_available():
_lowercase : Optional[int] = torch.npu.device_count()
_lowercase : Any = num_npus
_lowercase : List[Any] = False
if num_npus > 1:
_lowercase : int = 'MULTI_NPU'
else:
_lowercase : Dict = 'NO'
else:
_lowercase : List[Any] = 0
_lowercase : Optional[int] = True
_lowercase : List[str] = 1
_lowercase : List[Any] = 'NO'
_lowercase : Union[str, Any] = ClusterConfig(**lowerCAmelCase__)
config.to_json_file(lowerCAmelCase__)
return path
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = parser.add_parser('default' , parents=lowerCAmelCase__ , help=lowerCAmelCase__ , formatter_class=lowerCAmelCase__)
parser.add_argument(
'--config_file' , default=lowerCAmelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCAmelCase__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCAmelCase__)
return parser
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[Any]) -> Dict:
'''simple docstring'''
_lowercase : Tuple = write_basic_config(args.mixed_precision , args.save_location)
if config_file:
print(F'''accelerate configuration saved at {config_file}''')
| 125
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowerCamelCase ( self : List[str] , **lowerCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def lowerCamelCase ( self : Dict , **lowerCamelCase : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = 10, 0.0
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
_UpperCAmelCase = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).prev_sample
return sample
def lowerCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE )
def lowerCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = 10, 0.0
scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = self.dummy_sample_deter + 0.1
_UpperCAmelCase = self.dummy_sample_deter - 0.1
_UpperCAmelCase = samplea.shape[0]
_UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCAmelCase = torch.arange(__SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCAmelCase = scheduler.batch_step_no_noise(__SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def lowerCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_UpperCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
_UpperCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.full_loop(set_alpha_to_one=__SCREAMING_SNAKE_CASE , beta_start=0.01 )
_UpperCAmelCase = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 716
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a: Tuple = None
__a: List[str] = logging.get_logger(__name__)
__a: Tuple = '''▁'''
__a: List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__a: Union[str, Any] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
__a: Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PegasusTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=None , lowerCamelCase : Dict="<pad>" , lowerCamelCase : Tuple="</s>" , lowerCamelCase : Union[str, Any]="<unk>" , lowerCamelCase : Tuple="<mask_2>" , lowerCamelCase : List[Any]="<mask_1>" , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=103 , **lowerCamelCase : List[Any] , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowerCamelCase )}, but is"""
f""" {type(lowerCamelCase )}""" )
_UpperCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowerCamelCase ) , self.offset - 1 )
]
if len(set(lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_UpperCAmelCase = additional_special_tokens_extended
else:
_UpperCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , pad_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , mask_token=lowerCamelCase , mask_token_sent=lowerCamelCase , offset=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List , lowerCamelCase : Optional[List] = None , lowerCamelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(lowerCamelCase )
elif token_ids_a is None:
return self._special_token_mask(lowerCamelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCamelCase ( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 402
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_lowercase = 50003
_lowercase = 50002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Dict = PLBartTokenizer
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = False
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""base""" , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""base""" , keep_accents=_snake_case )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 4 , _snake_case )]
self.assertListEqual(_snake_case , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
_lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer(_snake_case , language_codes="""multi""" , keep_accents=_snake_case )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_snake_case , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
_lowerCAmelCase = tokenizer.vocab_size
_lowerCAmelCase = [tokenizer.convert_ids_to_tokens(_snake_case ) for x in range(end - 7 , _snake_case )]
self.assertListEqual(
_snake_case , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
_lowerCAmelCase = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_lowerCAmelCase = tokenizer(_snake_case ).input_ids
self.assertEqual(
tokenizer.decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) , _snake_case , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[Any] = "uclanlp/plbart-python-en_XX"
_lowercase : Any = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_lowercase : Optional[int] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_lowercase : List[str] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase ( cls ):
"""simple docstring"""
_lowerCAmelCase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
_lowerCAmelCase = 1
return cls
def _lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
_lowerCAmelCase = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
_lowerCAmelCase = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
_lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def _lowercase ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = tempfile.mkdtemp()
_lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
_lowerCAmelCase = PLBartTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors="""pt""" )
_lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
_lowerCAmelCase = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors="""pt""" )
_lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors="""pt""" )
_lowerCAmelCase = targets["input_ids"]
_lowerCAmelCase = shift_tokens_right(_snake_case , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 5
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71
| 0
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( _SCREAMING_SNAKE_CASE ):
_a : str = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , a : Any="</s>" , a : Any="<unk>" , a : List[str]="<pad>" , a : List[Any]=1_2_5 , a : List[str]=None , **a : str , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
__snake_case : str =[f'''<extra_id_{i}>''' for i in range(A_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__snake_case : Any =len(set(filter(lambda a : bool('''extra_id''' in str(A_ ) ) , A_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__snake_case : List[Any] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
__snake_case : Optional[Any] =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__snake_case : Tuple =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
super().__init__(
eos_token=A_ , unk_token=A_ , pad_token=A_ , extra_ids=A_ , additional_special_tokens=A_ , **A_ , )
__snake_case : Any =extra_ids
__snake_case : List[str] =2**8 # utf is 8 bits
# define special tokens dict
__snake_case : Any ={
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__snake_case : Optional[int] =len(self.special_tokens_encoder )
__snake_case : Any =len(A_ )
for i, token in enumerate(A_ ):
__snake_case : Tuple =self.vocab_size + i - n
__snake_case : Union[str, Any] ={v: k for k, v in self.special_tokens_encoder.items()}
@property
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _UpperCamelCase ( self : Tuple , a : Any , a : List[str] = None , a : Optional[int] = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def _UpperCamelCase ( self : Union[str, Any] , a : Tuple ):
"""simple docstring"""
if len(A_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCamelCase ( self : Dict , a : int , a : Optional[int] = None ):
"""simple docstring"""
__snake_case : str =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCamelCase ( self : Optional[Any] , a : Optional[int] , a : str = None ):
"""simple docstring"""
__snake_case : Any =self._add_eos_if_not_present(A_ )
if token_ids_a is None:
return token_ids_a
else:
__snake_case : int =self._add_eos_if_not_present(A_ )
return token_ids_a + token_ids_a
def _UpperCamelCase ( self : Union[str, Any] , a : Union[str, Any] ):
"""simple docstring"""
__snake_case : Dict =[chr(A_ ) for i in text.encode('''utf-8''' )]
return tokens
def _UpperCamelCase ( self : int , a : Optional[int] ):
"""simple docstring"""
if token in self.special_tokens_encoder:
__snake_case : Union[str, Any] =self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__snake_case : Dict =self.added_tokens_encoder[token]
elif len(A_ ) != 1:
__snake_case : Optional[Any] =self.unk_token_id
else:
__snake_case : int =ord(A_ ) + self._num_special_tokens
return token_id
def _UpperCamelCase ( self : List[Any] , a : Any ):
"""simple docstring"""
if index in self.special_tokens_decoder:
__snake_case : int =self.special_tokens_decoder[index]
else:
__snake_case : Optional[Any] =chr(index - self._num_special_tokens )
return token
def _UpperCamelCase ( self : str , a : Union[str, Any] ):
"""simple docstring"""
__snake_case : Any =b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__snake_case : Any =self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__snake_case : List[str] =self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__snake_case : str =token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__snake_case : List[Any] =token.encode('''utf-8''' )
else:
__snake_case : Any =bytes([ord(A_ )] )
bstring += tok_string
__snake_case : List[str] =bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def _UpperCamelCase ( self : str , a : List[Any] , a : Dict = None ):
"""simple docstring"""
return ()
| 703
|
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Optional[Any] , a : List[str] , a : List[Any]=1_3 , a : Any=7 , a : List[str]=True , a : int=True , a : Optional[int]=9_9 , a : List[Any]=3_2 , a : Optional[int]=5 , a : List[Any]=4 , a : Dict=3_7 , a : List[str]="gelu" , a : Union[str, Any]=0.1 , a : str=0.1 , a : List[str]=5_0 , a : Tuple=0.0_2 , a : Union[str, Any]=True , a : int=None , ):
"""simple docstring"""
__snake_case : Optional[int] =parent
__snake_case : Dict =batch_size
__snake_case : Union[str, Any] =seq_length
__snake_case : Optional[Any] =is_training
__snake_case : Any =use_input_mask
__snake_case : Union[str, Any] =vocab_size
__snake_case : Union[str, Any] =hidden_size
__snake_case : int =num_hidden_layers
__snake_case : Optional[int] =num_attention_heads
__snake_case : int =intermediate_size
__snake_case : str =hidden_act
__snake_case : str =hidden_dropout_prob
__snake_case : Tuple =attention_probs_dropout_prob
__snake_case : Tuple =max_position_embeddings
__snake_case : Optional[Any] =initializer_range
__snake_case : List[str] =use_labels
__snake_case : List[Any] =scope
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[int] =None
if self.use_input_mask:
__snake_case : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Any =self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict =self.prepare_config_and_inputs()
__snake_case : Dict =True
__snake_case : int =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Optional[int] , a : int , a : Tuple , **a : Any , ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Dict =model(a , attention_mask=a )
__snake_case : Tuple =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Tuple , a : Union[str, Any] , a : Optional[Any] , a : str , a : List[Any] , a : Optional[int] , a : Optional[Any] , **a : int , ):
"""simple docstring"""
__snake_case : Any =True
__snake_case : List[str] =BertGenerationEncoder(config=a )
model.to(a )
model.eval()
__snake_case : Union[str, Any] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__snake_case : str =model(
a , attention_mask=a , encoder_hidden_states=a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Any , a : List[str] , a : Dict , a : Optional[Any] , a : Tuple , a : Dict , a : Tuple , **a : Tuple , ):
"""simple docstring"""
__snake_case : List[Any] =True
__snake_case : Any =True
__snake_case : int =BertGenerationDecoder(config=a ).to(a ).eval()
# first forward pass
__snake_case : List[str] =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
__snake_case : Optional[int] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : List[str] =ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] =torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : Tuple =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['''hidden_states'''][0]
__snake_case : Any =model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['''hidden_states'''][0]
# select random slice
__snake_case : int =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : int =output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , a : List[Any] , a : Dict , a : Optional[Any] , a : List[str] , *a : Dict , ):
"""simple docstring"""
__snake_case : List[str] =BertGenerationDecoder(a )
model.to(a )
model.eval()
__snake_case : Any =model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Any =self.prepare_config_and_inputs()
__snake_case : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_a : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_a : Union[str, Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : int =BertGenerationEncoderTester(self )
__snake_case : Union[str, Any] =ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs()
__snake_case : int ='''bert'''
self.model_tester.create_and_check_model(a , a , a , a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Tuple =None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : Optional[int] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(a )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : List[Any] =BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Any =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : str =model(a )[0]
__snake_case : List[str] =torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , a )
__snake_case : int =torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : int =BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
__snake_case : Dict =torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__snake_case : List[str] =model(a )[0]
__snake_case : str =torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , a )
__snake_case : Optional[int] =torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 497
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452
|
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Dict = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase_ : Optional[int] = len(lowerCAmelCase__ ) if (len(lowerCAmelCase__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) ,'Stack'.center(lowerCAmelCase__ ) ,'Postfix'.center(lowerCAmelCase__ ) ,sep=' | ' ,)
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCAmelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCAmelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCAmelCase__ ) == 0:
stack.append(lowerCAmelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCAmelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCAmelCase__ ) # push x to stack
print(
x.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
while len(lowerCAmelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,(''.join(lowerCAmelCase__ )).ljust(lowerCAmelCase__ ) ,sep=' | ' ,) # Output in tabular format
return "".join(lowerCAmelCase__ ) # return Postfix as str
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Dict = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCAmelCase__ ) ):
if infix[i] == "(":
lowerCamelCase_ : str = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase_ : Optional[Any] = '(' # change ")" to "("
return (infix_2_postfix(''.join(lowerCAmelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_lowercase : int =input("""\nEnter an Infix Equation = """) # Input an Infix equation
_lowercase : Optional[Any] ="""""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 364
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Any:
lowercase__ : Tuple = parent
lowercase__ : Tuple = batch_size
lowercase__ : Tuple = is_training
lowercase__ : int = use_auxiliary_loss
lowercase__ : Dict = num_queries
lowercase__ : Optional[Any] = num_channels
lowercase__ : Dict = min_size
lowercase__ : Dict = max_size
lowercase__ : Optional[Any] = num_labels
lowercase__ : Union[str, Any] = hidden_dim
lowercase__ : Tuple = hidden_dim
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
lowercase__ : int = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
lowercase__ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
lowercase__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
lowercase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : int = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase__ : Tuple = self.num_queries
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : List[Any] = [1, 1, 1, 1]
lowercase__ : Tuple = self.num_channels
lowercase__ : Any = 64
lowercase__ : Union[str, Any] = 128
lowercase__ : List[str] = self.hidden_dim
lowercase__ : List[Any] = self.hidden_dim
lowercase__ : List[str] = self.hidden_dim
return config
def UpperCAmelCase__( self ) -> Any:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ : Optional[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Optional[int] = output.encoder_hidden_states
lowercase__ : Tuple = output.pixel_decoder_hidden_states
lowercase__ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[int]:
with torch.no_grad():
lowercase__ : List[str] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ : List[str] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
lowercase__ : List[Any] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
lowercase__ : Optional[int] = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__ : Optional[int] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
lowercase__ : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
lowercase__ : int = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_a : List[str] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[Any] = False
_a : Dict = False
_a : List[str] = False
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Dict = MaskaFormerModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self ) -> str:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase__( self ) -> Any:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase__( self ) -> str:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase__( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase__( self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase__( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__( self ) -> str:
pass
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(lowerCamelCase__ )
lowercase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def UpperCAmelCase__( self ) -> int:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase__ : Dict = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[int] = (self.model_tester.min_size,) * 2
lowercase__ : Optional[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"""class_labels""": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
lowercase__ : Optional[Any] = self.model_tester.get_config()
lowercase__ : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
lowercase__ : Tuple = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__( self ) -> str:
if not self.model_tester.is_training:
return
lowercase__ : Tuple = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
lowercase__ : List[str] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.all_model_classes[1]
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
lowercase__ : int = True
lowercase__ : Optional[int] = True
lowercase__ : Tuple = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
lowercase__ : int = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
lowercase__ : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__snake_case = 1E-4
def _lowerCamelCase ( ):
lowercase__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__( self ) -> Dict:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__( self ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
lowercase__ : Dict = self.default_image_processor
lowercase__ : List[Any] = prepare_img()
lowercase__ : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
lowercase__ : Tuple = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ : Optional[Any] = model(**lowerCamelCase__ )
lowercase__ : Tuple = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
lowercase__ : int = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
lowercase__ : List[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = prepare_img()
lowercase__ : Tuple = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
lowercase__ : Tuple = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
lowercase__ : Any = model(**lowerCamelCase__ )
# masks_queries_logits
lowercase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase__ : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowercase__ : Any = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
lowercase__ : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase__ : Dict = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
lowercase__ : int = self.default_image_processor
lowercase__ : List[str] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowercase__ : Union[str, Any] = inputs["""pixel_values"""].to(lowerCamelCase__ )
lowercase__ : str = [el.to(lowerCamelCase__ ) for el in inputs["""mask_labels"""]]
lowercase__ : Dict = [el.to(lowerCamelCase__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowercase__ : List[Any] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 128
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 128
| 1
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Image:
def brightness(__snake_case ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__a: str = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 108
|
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return x if y == 0 else greatest_common_divisor(lowerCAmelCase__ , x % y )
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
return (x * y) // greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int = 2_0 ):
__a : Union[str, Any] = 1
for i in range(1 , n + 1 ):
__a : Dict = lcm(lowerCAmelCase__ , lowerCAmelCase__ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 521
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
lowerCAmelCase__ ={
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
lowerCAmelCase__ ={
"allenai/longformer-base-4096": 4_096,
"allenai/longformer-large-4096": 4_096,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_096,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_096,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ) -> List[Any]:
__SCREAMING_SNAKE_CASE = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE = bs[:]
__SCREAMING_SNAKE_CASE = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE = [chr(UpperCAmelCase__ ) for n in cs]
return dict(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _a ( UpperCAmelCase__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
return pairs
class A__( __magic_name__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict="replace" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Dict="</s>" , __SCREAMING_SNAKE_CASE : Dict="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<pad>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<mask>" , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : int , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE = bytes_to_unicode()
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.encoder )
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(__SCREAMING_SNAKE_CASE ):
try:
__SCREAMING_SNAKE_CASE = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = new_word
if len(__SCREAMING_SNAKE_CASE ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ''' '''.join(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = word
return word
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
return self.decoder.get(__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ''''''.join(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + '''\n''' )
__SCREAMING_SNAKE_CASE = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , **__SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE = ''' ''' + text
return (text, kwargs)
| 701
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__:
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=10 , __SCREAMING_SNAKE_CASE : str=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Optional[int]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]="relu" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embeddings_size
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
def _a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _a ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFRegNetModel(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFRegNetForImageClassification(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class A__( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowerCAmelCase = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFRegNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _a ( self : Dict ) -> List[Any]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
def _a ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ):
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , training=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__SCREAMING_SNAKE_CASE = layer_type
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]={} ):
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"""
) , )
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
def _a ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFRegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _a ( ) -> Dict:
__SCREAMING_SNAKE_CASE = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class A__( unittest.TestCase ):
@cached_property
def _a ( self : List[Any] ) -> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
| 690
| 0
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase ,__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : int = emb.weight.shape
_lowerCAmelCase : Dict = nn.Linear(__UpperCAmelCase ,__UpperCAmelCase ,bias=__UpperCAmelCase )
_lowerCAmelCase : List[str] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[Any]="facebook/mbart-large-en-ro" ,_lowerCamelCase : List[Any]=False ,_lowerCamelCase : str=False ) -> List[Any]:
_lowerCAmelCase : int = torch.load(__UpperCAmelCase ,map_location="""cpu""" )["""model"""]
remove_ignore_keys_(__UpperCAmelCase )
_lowerCAmelCase : str = state_dict["""encoder.embed_tokens.weight"""].shape[0]
_lowerCAmelCase : Union[str, Any] = MBartConfig.from_pretrained(__UpperCAmelCase ,vocab_size=__UpperCAmelCase )
if mbart_aa and finetuned:
_lowerCAmelCase : Optional[Any] = """relu"""
_lowerCAmelCase : Optional[int] = state_dict["""decoder.embed_tokens.weight"""]
_lowerCAmelCase : Optional[Any] = MBartForConditionalGeneration(__UpperCAmelCase )
model.model.load_state_dict(__UpperCAmelCase )
if finetuned:
_lowerCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
_a : Dict = parser.parse_args()
_a : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 213
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {}
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = "llama"
a = ["past_key_values"]
def __init__( self : str , _snake_case : str=32000 , _snake_case : Any=4096 , _snake_case : int=11008 , _snake_case : List[str]=32 , _snake_case : Optional[int]=32 , _snake_case : Dict=None , _snake_case : Any="silu" , _snake_case : Optional[int]=2048 , _snake_case : Any=0.02 , _snake_case : List[str]=1e-6 , _snake_case : Union[str, Any]=True , _snake_case : List[Any]=0 , _snake_case : List[Any]=1 , _snake_case : Dict=2 , _snake_case : Tuple=1 , _snake_case : str=False , _snake_case : List[str]=None , **_snake_case : Tuple , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_key_value_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = rms_norm_eps
SCREAMING_SNAKE_CASE__ = pretraining_tp
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , tie_word_embeddings=_snake_case , **_snake_case , )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("type" , _snake_case )
SCREAMING_SNAKE_CASE__ = self.rope_scaling.get("factor" , _snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_snake_case , _snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 159
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _a :
"""simple docstring"""
A = None
A = None
A = None # sigma(t_i)
@classmethod
def __a ( cls ):
return cls()
@dataclass
class _a ( __A ):
"""simple docstring"""
A = 42
A = 42
A = 42
class _a ( __A , __A ):
"""simple docstring"""
@property
def __a ( self ):
return True
@register_to_config
def __init__( self ,__SCREAMING_SNAKE_CASE = 0.02 ,__SCREAMING_SNAKE_CASE = 100 ,__SCREAMING_SNAKE_CASE = 1.007 ,__SCREAMING_SNAKE_CASE = 80 ,__SCREAMING_SNAKE_CASE = 0.05 ,__SCREAMING_SNAKE_CASE = 50 ,):
pass
def __a ( self ):
return KarrasVeSchedulerState.create()
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = () ):
SCREAMING_SNAKE_CASE : Optional[Any] = jnp.arange(0 ,__SCREAMING_SNAKE_CASE )[::-1].copy()
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE ,schedule=jnp.array(__SCREAMING_SNAKE_CASE ,dtype=jnp.floataa ) ,timesteps=__SCREAMING_SNAKE_CASE ,)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,):
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE : Optional[Any] = min(self.config.s_churn / state.num_inference_steps ,2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE : int = 0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE : List[Any] = random.split(__SCREAMING_SNAKE_CASE ,num=1 )
SCREAMING_SNAKE_CASE : Tuple = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE ,shape=sample.shape )
SCREAMING_SNAKE_CASE : Optional[int] = sigma + gamma * sigma
SCREAMING_SNAKE_CASE : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = True ,):
SCREAMING_SNAKE_CASE : List[str] = sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE : str = (sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE ,derivative=__SCREAMING_SNAKE_CASE ,state=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = True ,):
SCREAMING_SNAKE_CASE : Any = sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE ,derivative=__SCREAMING_SNAKE_CASE ,state=__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
raise NotImplementedError()
| 704
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase = '\\n\n'
__UpperCAmelCase = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
__UpperCAmelCase = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def __a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) ,reference_urls=['https://huggingface.co/docs/transformers/perplexity'] ,)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = 16 ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Tuple = 'cuda'
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : List[str] = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : Optional[int] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : int = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(
__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,return_tensors='pt' ,return_attention_mask=__SCREAMING_SNAKE_CASE ,).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) ,1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) ,2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : List[str] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 ,len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : List[str] = min(start_index + batch_size ,len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Any = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask] ,dim=1 )
SCREAMING_SNAKE_CASE : str = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ).logits
SCREAMING_SNAKE_CASE : int = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : List[Any] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Any = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2 ) ,__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 220
| 0
|
import pprint
import requests
lowercase_ = """https://zenquotes.io/api"""
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def __UpperCamelCase () -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 235
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = [1] * len(_SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
while queue:
lowercase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_SCREAMING_SNAKE_CASE )
print(max(_SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
lowercase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 235
| 1
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , lowercase_ , )
if isinstance(lowercase_ , torch.Tensor ):
return image
elif isinstance(lowercase_ , PIL.Image.Image ):
A__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
A__ , A__ = image[0].size
A__ , A__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
A__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
A__ = np.concatenate(lowercase_ , axis=0 )
A__ = np.array(lowercase_ ).astype(np.floataa ) / 2_55.0
A__ = image.transpose(0 , 3 , 1 , 2 )
A__ = 2.0 * image - 1.0
A__ = torch.from_numpy(lowercase_ )
elif isinstance(image[0] , torch.Tensor ):
A__ = torch.cat(lowercase_ , dim=0 )
return image
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
if isinstance(lowercase_ , torch.Tensor ):
return mask
elif isinstance(lowercase_ , PIL.Image.Image ):
A__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
A__ , A__ = mask[0].size
A__ , A__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A__ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
A__ = np.concatenate(lowercase_ , axis=0 )
A__ = mask.astype(np.floataa ) / 2_55.0
A__ = 0
A__ = 1
A__ = torch.from_numpy(lowercase_ )
elif isinstance(mask[0] , torch.Tensor ):
A__ = torch.cat(lowercase_ , dim=0 )
return mask
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict) ->str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__)
@torch.no_grad()
def __call__( self : Optional[Any] , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase__ : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase__ : int = 250 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
A__ = image
A__ = _preprocess_image(UpperCAmelCase__)
A__ = original_image.to(device=self.device , dtype=self.unet.dtype)
A__ = _preprocess_mask(UpperCAmelCase__)
A__ = mask_image.to(device=self.device , dtype=self.unet.dtype)
A__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase__)}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
A__ = original_image.shape
A__ = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.device)
A__ = eta
A__ = self.scheduler.timesteps[0] + 1
A__ = generator[0] if isinstance(UpperCAmelCase__ , UpperCAmelCase__) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
A__ = self.unet(UpperCAmelCase__ , UpperCAmelCase__).sample
# compute previous image: x_t -> x_t-1
A__ = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
A__ = self.scheduler.undo_step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = t
A__ = (image / 2 + 0.5).clamp(0 , 1)
A__ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(UpperCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__)
| 177
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCAmelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , lowercase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
datasets.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A__ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = train_dataset.features['''label'''].names
if training_args.do_eval:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = eval_dataset.features['''label'''].names
if training_args.do_predict:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = predict_dataset.features['''label'''].names
# Labels
A__ = len(lowercase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase_ , idalabel={str(lowercase_ ): label for i, label in enumerate(lowercase_ )} , labelaid={label: i for i, label in enumerate(lowercase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A__ = False
def preprocess_function(lowercase_ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=lowercase_ , max_length=data_args.max_seq_length , truncation=lowercase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
A__ = train_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase_ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
A__ = eval_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A__ = min(len(lowercase_ ) , data_args.max_predict_samples )
A__ = predict_dataset.select(range(lowercase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
A__ = predict_dataset.map(
lowercase_ , batched=lowercase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
A__ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase_ ):
A__ = p.predictions[0] if isinstance(p.predictions , lowercase_ ) else p.predictions
A__ = np.argmax(lowercase_ , axis=1 )
return metric.compute(predictions=lowercase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A__ = default_data_collator
elif training_args.fpaa:
A__ = DataCollatorWithPadding(lowercase_ , pad_to_multiple_of=8 )
else:
A__ = None
# Initialize our Trainer
A__ = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=lowercase_ )
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase_ )
trainer.save_metrics('''train''' , lowercase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate(eval_dataset=lowercase_ )
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase_ )
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''eval''' , lowercase_ )
trainer.save_metrics('''eval''' , lowercase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
A__ , A__ , A__ = trainer.predict(lowercase_ , metric_key_prefix='''predict''' )
A__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase_ )
)
A__ = min(lowercase_ , len(lowercase_ ) )
trainer.log_metrics('''predict''' , lowercase_ )
trainer.save_metrics('''predict''' , lowercase_ )
A__ = np.argmax(lowercase_ , axis=1 )
A__ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(lowercase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase_ ):
A__ = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 177
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if (ksize % 2) == 0:
_UpperCAmelCase = ksize + 1
_UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
_UpperCAmelCase = x - ksize // 2
_UpperCAmelCase = y - ksize // 2
# degree to radiant
_UpperCAmelCase = theta / 180 * np.pi
_UpperCAmelCase = np.cos(_theta )
_UpperCAmelCase = np.sin(_theta )
# get kernel x
_UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__magic_name__ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
__magic_name__ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__magic_name__ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__magic_name__ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__magic_name__ = out / out.max() * 2_55
__magic_name__ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 657
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase , unittest.TestCase ):
lowercase_ : Tuple = BarthezTokenizer
lowercase_ : List[Any] = BarthezTokenizerFast
lowercase_ : Dict = True
lowercase_ : int = True
def _a ( self ) -> Any:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a_ )
_UpperCAmelCase = tokenizer
def _a ( self ) -> List[Any]:
_UpperCAmelCase = "<pad>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def _a ( self ) -> List[Any]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a_ ) , 101122 )
def _a ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def _a ( self ) -> List[Any]:
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
a_ , max_length=len(a_ ) , padding=a_ , truncation=a_ , return_tensors="pt" )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(a_ , a_ )
def _a ( self ) -> str:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = "I was born in 92000, and this is falsé."
_UpperCAmelCase = tokenizer.tokenize(a_ )
_UpperCAmelCase = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = tokenizer.encode(a_ , add_special_tokens=a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(a_ )
_UpperCAmelCase = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
@slow
def _a ( self ) -> Dict:
# fmt: off
_UpperCAmelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=a_ , )
| 657
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] = None , snake_case_ : str = None ):
if start is None:
__magic_name__ = 0
if end is None:
__magic_name__ = len(_snake_case ) - 1
if start >= end:
return
__magic_name__ = (start + end) // 2
slowsort(_snake_case , _snake_case , _snake_case )
slowsort(_snake_case , mid + 1 , _snake_case )
if sequence[end] < sequence[mid]:
__magic_name__ , __magic_name__ = sequence[mid], sequence[end]
slowsort(_snake_case , _snake_case , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 678
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class _UpperCAmelCase ( unittest.TestCase ):
a__ : List[str] = MODEL_FOR_CAUSAL_LM_MAPPING
a__ : Optional[int] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def a ( self : Tuple ):
__UpperCAmelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
__UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__UpperCAmelCase = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase , num_return_sequences=2 , return_tensors=_lowercase )
self.assertEqual(
_lowercase , [
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
] , )
__UpperCAmelCase = text_generator.model.config.eos_token_id
__UpperCAmelCase = '''<pad>'''
__UpperCAmelCase = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=_lowercase , )
self.assertEqual(
_lowercase , [
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
[
{'''generated_token_ids''': ANY(_lowercase )},
{'''generated_token_ids''': ANY(_lowercase )},
],
] , )
@require_tf
def a ( self : Dict ):
__UpperCAmelCase = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
__UpperCAmelCase = text_generator('''This is a test''' , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__UpperCAmelCase = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def a ( self : Any , _lowercase : str , _lowercase : Optional[int] , _lowercase : str ):
__UpperCAmelCase = TextGenerationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_generator, ["This is a test", "Another test"]
def a ( self : List[Any] ):
__UpperCAmelCase = '''Hello I believe in'''
__UpperCAmelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = text_generator(_lowercase )
self.assertEqual(
_lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__UpperCAmelCase = text_generator(_lowercase , stop_sequence=''' fe''' )
self.assertEqual(_lowercase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def a ( self : Optional[int] , _lowercase : List[Any] , _lowercase : str ):
__UpperCAmelCase = text_generator.model
__UpperCAmelCase = text_generator.tokenizer
__UpperCAmelCase = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__UpperCAmelCase = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__UpperCAmelCase = pipeline(task='''text-generation''' , model=_lowercase , tokenizer=_lowercase , return_full_text=_lowercase )
__UpperCAmelCase = text_generator('''This is a test''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
__UpperCAmelCase = text_generator('''This is a test''' , return_full_text=_lowercase )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
__UpperCAmelCase = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__UpperCAmelCase = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_lowercase )
self.assertEqual(
_lowercase , [
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
[{'''generated_text''': ANY(_lowercase )}, {'''generated_text''': ANY(_lowercase )}],
] , )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = text_generator('''test''' , return_full_text=_lowercase , return_text=_lowercase )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = text_generator('''test''' , return_full_text=_lowercase , return_tensors=_lowercase )
with self.assertRaises(_lowercase ):
__UpperCAmelCase = text_generator('''test''' , return_text=_lowercase , return_tensors=_lowercase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__UpperCAmelCase = text_generator('''''' )
self.assertEqual(_lowercase , [{'''generated_text''': ANY(_lowercase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__UpperCAmelCase = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__UpperCAmelCase = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
__UpperCAmelCase = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_lowercase ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def a ( self : List[Any] ):
import torch
# Classic `model_kwargs`
__UpperCAmelCase = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__UpperCAmelCase = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__UpperCAmelCase = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__UpperCAmelCase = pipe('''This is a test''' )
self.assertEqual(
_lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def a ( self : Union[str, Any] ):
import torch
__UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def a ( self : int ):
import torch
__UpperCAmelCase = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_lowercase , top_p=0.5 )
def a ( self : int ):
__UpperCAmelCase = '''Hello world'''
__UpperCAmelCase = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
__UpperCAmelCase = logging.get_logger('''transformers.generation.tf_utils''' )
else:
__UpperCAmelCase = logging.get_logger('''transformers.generation.utils''' )
__UpperCAmelCase = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_lowercase ) as cl:
__UpperCAmelCase = text_generator(_lowercase , max_length=10 , max_new_tokens=1 )
self.assertIn(_lowercase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_lowercase ) as cl:
__UpperCAmelCase = text_generator(_lowercase , max_new_tokens=1 )
self.assertNotIn(_lowercase , cl.out )
with CaptureLogger(_lowercase ) as cl:
__UpperCAmelCase = text_generator(_lowercase , max_length=10 )
self.assertNotIn(_lowercase , cl.out )
| 49
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "EncodecFeatureExtractor"
a__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
__UpperCAmelCase = self.feature_extractor
__UpperCAmelCase = False
def a ( self : List[str] , _lowercase : List[Any]=None , _lowercase : List[str]=None , _lowercase : Any=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_lowercase , language=_lowercase , no_timestamps=_lowercase )
def __call__( self : Any , *_lowercase : Optional[Any] , **_lowercase : Union[str, Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if text is not None:
__UpperCAmelCase = self.tokenizer(_lowercase , **_lowercase )
if audio is not None:
__UpperCAmelCase = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__UpperCAmelCase = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__UpperCAmelCase = audio_inputs['''padding_mask''']
return inputs
def a ( self : str , *_lowercase : Dict , **_lowercase : List[str] ):
__UpperCAmelCase = kwargs.pop('''audio''' , _lowercase )
__UpperCAmelCase = kwargs.pop('''padding_mask''' , _lowercase )
if len(_lowercase ) > 0:
__UpperCAmelCase = args[0]
__UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(_lowercase , padding_mask=_lowercase )
else:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a ( self : Union[str, Any] , *_lowercase : int , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : List[Any] , _lowercase : Optional = None ):
__UpperCAmelCase = to_numpy(_lowercase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(_lowercase )
__UpperCAmelCase = to_numpy(_lowercase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__UpperCAmelCase = seq_len - padding_mask.shape[-1]
__UpperCAmelCase = 1 - self.feature_extractor.padding_value
__UpperCAmelCase = np.pad(_lowercase , ((0, 0), (0, difference)) , '''constant''' , constant_values=_lowercase )
__UpperCAmelCase = audio_values.tolist()
for i in range(_lowercase ):
__UpperCAmelCase = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__UpperCAmelCase = sliced_audio.reshape(_lowercase , -1 )
return audio_values
| 49
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A : int = logging.get_logger(__name__)
__A : Optional[int] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "bit"
lowerCamelCase__ = ["preactivation", "bottleneck"]
lowerCamelCase__ = ["SAME", "VALID"]
def __init__( self : List[str] , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Optional[Any]=64 , __lowerCamelCase : int=[256, 512, 1024, 2048] , __lowerCamelCase : int=[3, 4, 6, 3] , __lowerCamelCase : Tuple="preactivation" , __lowerCamelCase : Any="relu" , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : List[Any]=False , __lowerCamelCase : Optional[int]=32 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Union[str, Any]=None , **__lowerCamelCase : Any , ):
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE = global_padding.upper()
else:
raise ValueError(f"Padding strategy {global_padding} not supported" )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embedding_size
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = layer_type
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = global_padding
SCREAMING_SNAKE_CASE = num_groups
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = embedding_dynamic_padding
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = width_factor
SCREAMING_SNAKE_CASE = ["stem"] + [f"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 719
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def _snake_case ( *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
pass
def __a ( A__ : str ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Tuple = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
SCREAMING_SNAKE_CASE = "What is the placebo?"
SCREAMING_SNAKE_CASE = [
{
"image": load_image(__lowerCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
{"score": ANY(__lowerCamelCase ), "answer": ANY(__lowerCamelCase ), "start": ANY(__lowerCamelCase ), "end": ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "How many cats are there?"
SCREAMING_SNAKE_CASE = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE = "./tests/fixtures/tests_samples/COCO/000000039769.png"
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=__lowerCamelCase , revision="3dc6de3" , max_seq_len=50 , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
SCREAMING_SNAKE_CASE = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , "" ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
SCREAMING_SNAKE_CASE = INVOICE_URL
SCREAMING_SNAKE_CASE = "What is the invoice number?"
SCREAMING_SNAKE_CASE = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self : List[Any] ):
pass
| 698
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
snake_case_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
snake_case_ = resnets
snake_case_ = attentions
if self.add_downsample:
snake_case_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int]=True ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
snake_case_ = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
snake_case_ = resnets
if self.add_downsample:
snake_case_ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=True ) -> Any:
"""simple docstring"""
snake_case_ = ()
for resnet in self.resnets:
snake_case_ = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
snake_case_ = []
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
snake_case_ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
snake_case_ = resnets
snake_case_ = attentions
if self.add_upsample:
snake_case_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=True ) -> List[Any]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ = res_hidden_states_tuple[-1]
snake_case_ = res_hidden_states_tuple[:-1]
snake_case_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
snake_case_ = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
snake_case_ = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = []
for i in range(self.num_layers ):
snake_case_ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
snake_case_ = resnets
if self.add_upsample:
snake_case_ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int]=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
snake_case_ = res_hidden_states_tuple[-1]
snake_case_ = res_hidden_states_tuple[:-1]
snake_case_ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
snake_case_ = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
def lowerCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
# there is always at least one resnet
snake_case_ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ = []
for _ in range(self.num_layers ):
snake_case_ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
snake_case_ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
snake_case_ = resnets
snake_case_ = attentions
def __call__( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=True ) -> Dict:
"""simple docstring"""
snake_case_ = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
snake_case_ = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
return hidden_states
| 283
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE :Union[str, Any] = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Version] , lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = parse(importlib.metadata.version(lowerCAmelCase_ ) )
return operation(lowerCAmelCase_ , parse(lowerCAmelCase_ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
return compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 283
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : Tuple = "swin"
a : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, __magic_name__=224, __magic_name__=4, __magic_name__=3, __magic_name__=96, __magic_name__=[2, 2, 6, 2], __magic_name__=[3, 6, 12, 24], __magic_name__=7, __magic_name__=4.0, __magic_name__=True, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.1, __magic_name__="gelu", __magic_name__=False, __magic_name__=0.02, __magic_name__=1E-5, __magic_name__=32, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> Any:
"""simple docstring"""
super().__init__(**__magic_name__ )
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : List[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Optional[Any] = embed_dim
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : Tuple = len(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = num_heads
UpperCamelCase__ : Union[str, Any] = window_size
UpperCamelCase__ : Tuple = mlp_ratio
UpperCamelCase__ : Dict = qkv_bias
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = drop_path_rate
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : List[str] = use_absolute_embeddings
UpperCamelCase__ : List[Any] = layer_norm_eps
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Tuple = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : Optional[Any] = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
UpperCamelCase__ : int = ['''stem'''] + [f"stage{idx}" for idx in range(1, len(__magic_name__ ) + 1 )]
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__magic_name__, out_indices=__magic_name__, stage_names=self.stage_names )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Dict = version.parse("1.11" )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 369
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Dict ) -> Dict:
UpperCamelCase__ : List[str] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Union[str, Any] = val
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> List[str]:
UpperCamelCase__ : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase__ : Optional[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
UpperCamelCase__ : List[str] = value
else:
UpperCamelCase__ : Optional[int] = value
return new_state_dict
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[int]=False ) -> Optional[int]:
UpperCamelCase__ : List[str] = ''''''
if is_panoptic:
UpperCamelCase__ : List[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase__ : List[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase__ : int = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Any = in_proj_weight[:256, :]
UpperCamelCase__ : List[str] = in_proj_bias[:256]
UpperCamelCase__ : Optional[int] = in_proj_weight[256:512, :]
UpperCamelCase__ : List[str] = in_proj_bias[256:512]
UpperCamelCase__ : Tuple = in_proj_weight[-256:, :]
UpperCamelCase__ : List[Any] = in_proj_bias[-256:]
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : Optional[int] = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Any ) -> Optional[int]:
UpperCamelCase__ : Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase__ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Union[str, Any] = '''panoptic''' in model_name
if is_panoptic:
UpperCamelCase__ : Any = 250
else:
UpperCamelCase__ : int = 91
UpperCamelCase__ : List[str] = '''huggingface/label-files'''
UpperCamelCase__ : List[str] = '''coco-detection-id2label.json'''
UpperCamelCase__ : Tuple = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : Dict = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase__ : int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
UpperCamelCase__ : str = ConditionalDetrImageProcessor(format=__UpperCAmelCase )
# prepare image
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
UpperCamelCase__ : Union[str, Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , __UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
UpperCamelCase__ : Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase__ : List[str] = '''conditional_detr.''' + src
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : str = rename_backbone_keys(__UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__UpperCAmelCase , is_panoptic=__UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase__ : List[str] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
UpperCamelCase__ : Tuple = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase__ : Any = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Dict = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
UpperCamelCase__ : List[Any] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
UpperCamelCase__ : List[str] = state_dict.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ : str = ConditionalDetrForSegmentation(__UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=__UpperCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
UpperCamelCase__ : str = conditional_detr(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
UpperCAmelCase_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 369
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A = 6378137.0
A = 6356752.314245
A = 6_378_137
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
__UpperCAmelCase : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__UpperCAmelCase : Dict = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
__UpperCAmelCase : Optional[int] = atan((1 - flattening) * tan(radians(UpperCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__UpperCAmelCase : List[Any] = haversine_distance(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__UpperCAmelCase : str = (b_lata + b_lata) / 2
__UpperCAmelCase : Tuple = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__UpperCAmelCase : Tuple = (sin(UpperCamelCase ) ** 2) * (cos(UpperCamelCase ) ** 2)
__UpperCAmelCase : Optional[Any] = cos(sigma / 2 ) ** 2
__UpperCAmelCase : List[Any] = (sigma - sin(UpperCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__UpperCAmelCase : Union[str, Any] = (cos(UpperCamelCase ) ** 2) * (sin(UpperCamelCase ) ** 2)
__UpperCAmelCase : List[str] = sin(sigma / 2 ) ** 2
__UpperCAmelCase : Union[str, Any] = (sigma + sin(UpperCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : int = ['input_ids', 'attention_mask']
def __init__( self , snake_case , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case = None , **snake_case , ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
lowerCAmelCase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
lowerCAmelCase__ : Union[str, Any] = vocab_file
lowerCAmelCase__ : Optional[Any] = monolingual_vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : Optional[int] = cnt
cnt += 1
with open(snake_case , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
lowerCAmelCase__ : Union[str, Any] = line.strip().split()[0]
lowerCAmelCase__ : List[str] = len(self.fairseq_tokens_to_ids )
if str(snake_case ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase__ : int = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.__dict__.copy()
lowerCAmelCase__ : str = None
lowerCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
lowerCAmelCase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None , snake_case = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = "".join(snake_case ).replace(snake_case , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , snake_case , snake_case = None ):
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : Optional[Any] = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
lowerCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(snake_case )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 453
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self : str ):
__lowercase : Dict = 1
__lowercase : Any = 3
__lowercase : Dict = (32, 32)
__lowercase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def snake_case_ ( self : List[Any] ):
torch.manual_seed(0 )
__lowercase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def snake_case_ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def snake_case_ ( self : Any ):
torch.manual_seed(0 )
__lowercase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_snake_case )
@property
def snake_case_ ( self : Optional[int] ):
def extract(*_snake_case : str , **_snake_case : Dict ):
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase : List[str] = torch.ones([0] )
def snake_case_ ( self : Union[str, Any] , _snake_case : Tuple ):
self.pixel_values.to(_snake_case )
return self
return Out()
return extract
def snake_case_ ( self : Optional[Any] ):
__lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : Optional[int] = self.dummy_cond_unet
__lowercase : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
__lowercase : Optional[int] = self.dummy_vae
__lowercase : Dict = self.dummy_text_encoder
__lowercase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
__lowercase : Dict = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
__lowercase : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : List[str] = '''A painting of a squirrel eating a burger'''
__lowercase : Tuple = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowercase : Optional[Any] = sd_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
__lowercase : Optional[Any] = output.images
__lowercase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowercase : List[str] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_snake_case , )[0]
__lowercase : List[Any] = image[0, -3:, -3:, -1]
__lowercase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase : int = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self : Tuple ):
__lowercase : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase : int = self.dummy_cond_unet
__lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
__lowercase : Tuple = self.dummy_vae
__lowercase : Optional[int] = self.dummy_text_encoder
__lowercase : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
__lowercase : Optional[int] = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
__lowercase : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__lowercase : List[str] = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowercase : Tuple = sd_pipe([prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
__lowercase : Union[str, Any] = output.images
__lowercase : str = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowercase : List[Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=_snake_case , )[0]
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowercase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowercase : Tuple = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self : Optional[int] ):
__lowercase : Optional[int] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=_snake_case )
assert isinstance(_snake_case , _snake_case )
assert isinstance(pipe.scheduler , _snake_case )
assert pipe.safety_checker is None
__lowercase : Dict = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
__lowercase : int = StableDiffusionPipeline.from_pretrained(_snake_case )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase : Union[str, Any] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def snake_case_ ( self : Tuple ):
__lowercase : Any = self.dummy_cond_unet
__lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
__lowercase : Union[str, Any] = self.dummy_vae
__lowercase : Union[str, Any] = self.dummy_text_encoder
__lowercase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
__lowercase : Union[str, Any] = unet.half()
__lowercase : Dict = vae.half()
__lowercase : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
__lowercase : Any = StableDiffusionPipeline(
unet=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , safety_checker=_snake_case , feature_extractor=self.dummy_extractor , )
__lowercase : Union[str, Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Tuple = '''A painting of a squirrel eating a burger'''
__lowercase : str = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case )
__lowercase : int = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : int = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
__lowercase : Any = 40_0366_0346
__lowercase : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
__lowercase : int = torch.manual_seed(_snake_case )
__lowercase : Optional[Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowercase : Dict = output.images
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
__lowercase : Any = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__lowercase : Tuple = torch.manual_seed(_snake_case )
__lowercase : Union[str, Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowercase : Tuple = output.images
__lowercase : Optional[int] = image[0, -3:, -3:, -1]
__lowercase : int = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self : Tuple ):
__lowercase : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case )
__lowercase : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Optional[int] = '''padme amidala taking a bath artwork, safe for work, no nudity'''
__lowercase : Union[str, Any] = 27_3497_1755
__lowercase : str = 7
__lowercase : Optional[Any] = torch.manual_seed(_snake_case )
__lowercase : Tuple = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowercase : Dict = output.images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__lowercase : List[Any] = torch.manual_seed(_snake_case )
__lowercase : str = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowercase : List[str] = output.images
__lowercase : Any = image[0, -3:, -3:, -1]
__lowercase : Optional[Any] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self : Union[str, Any] ):
__lowercase : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
__lowercase : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
__lowercase : Any = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
__lowercase : Optional[Any] = 10_4435_5234
__lowercase : Tuple = 12
__lowercase : List[Any] = torch.manual_seed(_snake_case )
__lowercase : Any = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
__lowercase : Tuple = output.images
__lowercase : List[str] = image[0, -3:, -3:, -1]
__lowercase : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__lowercase : Tuple = torch.manual_seed(_snake_case )
__lowercase : Union[str, Any] = sd_pipe(
[prompt] , generator=_snake_case , guidance_scale=_snake_case , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__lowercase : Union[str, Any] = output.images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
__lowercase : Dict = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 284
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case_ ( *_snake_case : Union[str, Any] , **_snake_case : str ):
pass
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : Dict = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case_ ( self : List[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any ):
__lowercase : List[str] = pipeline(
'''document-question-answering''' , model=_snake_case , tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : int = INVOICE_URL
__lowercase : Optional[int] = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
__lowercase : List[str] = '''What is the placebo?'''
__lowercase : Tuple = [
{
'''image''': load_image(_snake_case ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def snake_case_ ( self : List[str] , _snake_case : int , _snake_case : List[Any] ):
__lowercase : Optional[Any] = dqa_pipeline(_snake_case , top_k=2 )
self.assertEqual(
_snake_case , [
[
{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case ), '''start''': ANY(_snake_case ), '''end''': ANY(_snake_case )},
{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case ), '''start''': ANY(_snake_case ), '''end''': ANY(_snake_case )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : Optional[int] ):
__lowercase : List[str] = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__lowercase : Any = INVOICE_URL
__lowercase : str = '''How many cats are there?'''
__lowercase : Optional[int] = [
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.00_01, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__lowercase : Optional[Any] = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , _snake_case )
__lowercase : Optional[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , _snake_case )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase : Optional[int] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : str = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(_snake_case , [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__lowercase : Dict = []
__lowercase : List[str] = []
__lowercase : Tuple = dqa_pipeline(image=_snake_case , question=_snake_case , words=_snake_case , boxes=_snake_case , top_k=2 )
self.assertEqual(_snake_case , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : int ):
__lowercase : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__lowercase : List[str] = INVOICE_URL
__lowercase : Optional[Any] = '''What is the invoice number?'''
__lowercase : int = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : List[str] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_44, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.00_09, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case_ ( self : int ):
__lowercase : Optional[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__lowercase : Union[str, Any] = INVOICE_URL
__lowercase : Any = '''What is the invoice number?'''
__lowercase : Any = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_74, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.99_48, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self : str ):
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_snake_case )
__lowercase : Union[str, Any] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_snake_case , revision='''3dc6de3''' , )
__lowercase : Tuple = INVOICE_URL
__lowercase : Any = '''What is the invoice number?'''
__lowercase : Optional[Any] = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : List[Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__lowercase : int = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__lowercase : Union[str, Any] = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : Union[str, Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.42_51, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.08_19, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case_ ( self : List[str] ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=_snake_case )
__lowercase : List[str] = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=_snake_case , revision='''3dc6de3''' , max_seq_len=50 , )
__lowercase : Tuple = INVOICE_URL
__lowercase : Optional[int] = '''What is the invoice number?'''
__lowercase : Any = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__lowercase : Any = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
[
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__lowercase : int = list(zip(*apply_tesseract(load_image(_snake_case ) , _snake_case , '''''' ) ) )
# This model should also work if `image` is set to None
__lowercase : Tuple = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [
{'''score''': 0.99_99, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.99_98, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def snake_case_ ( self : List[Any] ):
__lowercase : int = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__lowercase : List[Any] = INVOICE_URL
__lowercase : List[Any] = '''What is the invoice number?'''
__lowercase : int = dqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(nested_simplify(_snake_case , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def snake_case_ ( self : int ):
pass
| 284
| 1
|
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a_ = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : float
_A : str
_A : bool
@dataclass
class __lowercase :
"""simple docstring"""
_A : int = 42
_A : str = field(default="""toto""" , metadata={"""help""": """help message"""})
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : Optional[bool] = None
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[Any] = """titi"""
_A : Dict = """toto"""
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = """titi"""
_A : Optional[Any] = """toto"""
_A : Optional[Any] = 42
@dataclass
class __lowercase :
"""simple docstring"""
_A : BasicEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : str = BasicEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : MixedTypeEnum = "toto"
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = MixedTypeEnum(self.foo )
@dataclass
class __lowercase :
"""simple docstring"""
_A : Optional[int] = None
_A : Optional[float] = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : Optional[str] = None
_A : Optional[List[str]] = list_field(default=[])
_A : Optional[List[int]] = list_field(default=[])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = list_field(default=[])
_A : List[int] = list_field(default=[1, 2, 3])
_A : List[str] = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
_A : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class __lowercase :
"""simple docstring"""
_A : List[int] = field()
_A : str = field()
_A : BasicEnum = field()
def __UpperCamelCase (self ):
snake_case_ : Dict = BasicEnum(self.required_enum )
@dataclass
class __lowercase :
"""simple docstring"""
_A : int
_A : "BasicEnum" = field()
_A : "Optional[bool]" = None
_A : "str" = field(default="""toto""" , metadata={"""help""": """help message"""})
_A : "List[str]" = list_field(default=["""Hallo""", """Bonjour""", """Hello"""])
if is_python_no_less_than_3_10:
@dataclass
class __lowercase :
"""simple docstring"""
_A : bool = False
_A : bool = True
_A : bool | None = None
@dataclass
class __lowercase :
"""simple docstring"""
_A : int | None = None
_A : float | None = field(default=_UpperCAmelCase , metadata={"""help""": """help message"""})
_A : str | None = None
_A : list[str] | None = list_field(default=[])
_A : list[int] | None = list_field(default=[])
class __lowercase ( unittest.TestCase):
"""simple docstring"""
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
snake_case_ : str = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
snake_case_ : Dict = {k: v for k, v in vars(lowercase__ ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , lowercase__ ) and yy.get("""choices""" , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](lowercase__ ) , yy["""type"""](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = HfArgumentParser(lowercase__ )
snake_case_ : Dict = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--bar""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--baz""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--flag""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Dict = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((snake_case_) , ) : str = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def __UpperCamelCase (self ):
snake_case_ : Tuple = HfArgumentParser(lowercase__ )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
expected.add_argument("""--baz""" , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=lowercase__ , dest="""baz""" )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
snake_case_ : int = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Optional[int] = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Tuple = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Any = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Dict = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Dict = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
snake_case_ : Any = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def __UpperCamelCase (self ):
snake_case_ : Dict = HfArgumentParser(lowercase__ )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : Union[str, Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
snake_case_ : Union[str, Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
snake_case_ : Optional[int] = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
snake_case_ : int = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __UpperCamelCase (self ):
@dataclass
class __lowercase :
"""simple docstring"""
_A : Literal["titi", "toto", 42] = "toto"
snake_case_ : Optional[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
snake_case_ : List[Any] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
snake_case_ : str = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def __UpperCamelCase (self ):
snake_case_ : str = HfArgumentParser(lowercase__ )
snake_case_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : Optional[int] = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
snake_case_ : Dict = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def __UpperCamelCase (self ):
snake_case_ : int = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--bar""" , default=lowercase__ , type=lowercase__ , help="""help message""" )
expected.add_argument("""--baz""" , default=lowercase__ , type=lowercase__ )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=lowercase__ )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=lowercase__ )
snake_case_ : Union[str, Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
snake_case_ : Dict = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
snake_case_ : int = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
snake_case_ : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=lowercase__ , required=lowercase__ )
expected.add_argument("""--required_str""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = HfArgumentParser(lowercase__ )
snake_case_ : Optional[int] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=lowercase__ , required=lowercase__ )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=lowercase__ , )
expected.add_argument("""--opt""" , type=lowercase__ , default=lowercase__ )
expected.add_argument("""--baz""" , default="""toto""" , type=lowercase__ , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = HfArgumentParser(lowercase__ )
snake_case_ : int = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
snake_case_ : str = parser.parse_dict(lowercase__ )[0]
snake_case_ : List[Any] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = HfArgumentParser(lowercase__ )
snake_case_ : Optional[Any] = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[str] = HfArgumentParser(lowercase__ )
snake_case_ : Any = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = os.path.join(lowercase__ , """temp_json""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(lowercase__ , lowercase__ )
snake_case_ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
snake_case_ : Optional[int] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = HfArgumentParser(lowercase__ )
snake_case_ : Dict = {
"""foo""": 12,
"""bar""": 3.14,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : List[str] = os.path.join(lowercase__ , """temp_yaml""" )
os.mkdir(lowercase__ )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(lowercase__ , lowercase__ )
snake_case_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
snake_case_ : Union[str, Any] = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 480
|
"""simple docstring"""
from math import factorial
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0 ):
"""simple docstring"""
snake_case_ : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ : Tuple = n // 2
return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number.''')
| 480
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowercase ( a__ ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self : List[str] , lowercase__ : UNetaDModel , lowercase__ : ScoreSdeVeScheduler ):
super().__init__()
self.register_modules(unet=lowercase__ , scheduler=lowercase__ )
@torch.no_grad()
def __call__( self : List[Any] , lowercase__ : int = 1 , lowercase__ : int = 2_0_0_0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , **lowercase__ : Dict , ):
a_ = self.unet.config.sample_size
a_ = (batch_size, 3, img_size, img_size)
a_ = self.unet
a_ = randn_tensor(lowercase__ , generator=lowercase__ ) * self.scheduler.init_noise_sigma
a_ = sample.to(self.device )
self.scheduler.set_timesteps(lowercase__ )
self.scheduler.set_sigmas(lowercase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a_ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a_ = self.unet(lowercase__ , lowercase__ ).sample
a_ = self.scheduler.step_correct(lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
# prediction step
a_ = model(lowercase__ , lowercase__ ).sample
a_ = self.scheduler.step_pred(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ )
a_ , a_ = output.prev_sample, output.prev_sample_mean
a_ = sample_mean.clamp(0 , 1 )
a_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowercase__ )
| 143
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowercase :
def __init__( self : int , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=1_3 , lowercase__ : int=7 , lowercase__ : Dict=True , lowercase__ : List[Any]=True , lowercase__ : int=True , lowercase__ : Optional[Any]=True , lowercase__ : Union[str, Any]=9_9 , lowercase__ : Any=6_4 , lowercase__ : int=3_2 , lowercase__ : str=5 , lowercase__ : List[str]=4 , lowercase__ : str=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : Any=0.1 , lowercase__ : Any=0.1 , lowercase__ : Dict=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : List[str]=2 , lowercase__ : Dict=0.02 , lowercase__ : List[Any]=3 , lowercase__ : List[Any]=4 , lowercase__ : Optional[int]=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = embedding_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def __magic_name__ ( self : List[Any] ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Optional[Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def __magic_name__ ( self : List[str] , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : str ):
a_ = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ , token_type_ids=lowercase__ )
a_ = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : int ):
a_ = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[str] ):
a_ = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : int , lowercase__ : str , lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ):
a_ = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int ):
a_ = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , next_sentence_label=lowercase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __magic_name__ ( self : int , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : List[Any] ):
a_ = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : str , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] ):
a_ = self.num_labels
a_ = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] , lowercase__ : Dict , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_labels
a_ = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ):
a_ = self.num_choices
a_ = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
a_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a_ = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[Any] ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( a__ , a__ , unittest.TestCase ):
_lowerCAmelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
# test_resize_embeddings = False
_lowerCAmelCase = False
def __magic_name__ ( self : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any=False ):
a_ = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
a_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase__ )
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def __magic_name__ ( self : Any ):
a_ = MegatronBertModelTester(self )
a_ = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def __magic_name__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def __magic_name__ ( self : List[str] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def __magic_name__ ( self : Optional[int] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def __magic_name__ ( self : Union[str, Any] ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def __magic_name__ ( self : Tuple ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def __magic_name__ ( self : int ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
UpperCamelCase__ = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def __magic_name__ ( self : Union[str, Any] ):
a_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
a_ = os.path.join(os.environ['''MYDIR'''] , lowercase__ )
a_ = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
a_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
a_ = model(lowercase__ )[0]
a_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , lowercase__ )
a_ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
a_ = output[0, ii, jj]
a_ = expected[3 * ii + jj]
a_ = '''ii={} jj={} a={} b={}'''.format(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(math.isclose(lowercase__ , lowercase__ , rel_tol=lowercase__ , abs_tol=lowercase__ ) , msg=lowercase__ )
| 143
| 1
|
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = len(_lowerCAmelCase )
__lowercase = [[0] * n for i in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
__lowercase = y_points[i]
for i in range(2 , _lowerCAmelCase ):
for j in range(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
lowercase : Dict = KandinskyInpaintPipeline
lowercase : Any = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase : Any = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase : Dict = False
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 1_00
@property
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase : Optional[int] = MultilingualCLIP(__UpperCamelCase )
__UpperCamelCase : int = text_encoder.eval()
return text_encoder
@property
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase : List[Any] = UNetaDConditionModel(**__UpperCamelCase )
return model
@property
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : Any = self.dummy_tokenizer
__UpperCamelCase : Tuple = self.dummy_unet
__UpperCamelCase : List[str] = self.dummy_movq
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCamelCase , )
__UpperCamelCase : Dict = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCamelCase )
# create init_image
__UpperCamelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__UpperCamelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase : Optional[Any] = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase : List[Any] = 0
if str(__UpperCamelCase ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(__UpperCamelCase )
else:
__UpperCamelCase : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__UpperCamelCase : Any = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = "cpu"
__UpperCamelCase : Any = self.get_dummy_components()
__UpperCamelCase : Optional[Any] = self.pipeline_class(**__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
__UpperCamelCase : str = output.images
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
__UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : Tuple = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase : str = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase : Dict = 0
__UpperCamelCase : Union[str, Any] = "a hat"
__UpperCamelCase : Any = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCamelCase )
__UpperCamelCase : Any = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase : Any = pipeline.to(__UpperCamelCase )
pipeline.set_progress_bar_config(disable=__UpperCamelCase )
__UpperCamelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : int = pipe_prior(
__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase : List[Any] = pipeline(
__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , image_embeds=__UpperCamelCase , negative_image_embeds=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCamelCase , __UpperCamelCase )
| 327
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = KandinskyImgaImgPipeline
_lowerCamelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
_lowerCamelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowerCamelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCamelCase = False
@property
def lowerCAmelCase__ ( self ):
return 32
@property
def lowerCAmelCase__ ( self ):
return 32
@property
def lowerCAmelCase__ ( self ):
return self.time_input_dim
@property
def lowerCAmelCase__ ( self ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self ):
return 100
@property
def lowerCAmelCase__ ( self ):
__magic_name__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__magic_name__ = MultilingualCLIP(UpperCamelCase_ )
__magic_name__ = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__magic_name__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
__magic_name__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self ):
__magic_name__ = self.dummy_text_encoder
__magic_name__ = self.dummy_tokenizer
__magic_name__ = self.dummy_unet
__magic_name__ = self.dummy_movq
__magic_name__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__magic_name__ = DDIMScheduler(**UpperCamelCase_ )
__magic_name__ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
__magic_name__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__magic_name__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase_ )
# create init_image
__magic_name__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(UpperCamelCase_ )
else:
__magic_name__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__magic_name__ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = '''cpu'''
__magic_name__ = self.get_dummy_components()
__magic_name__ = self.pipeline_class(**UpperCamelCase_ )
__magic_name__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__magic_name__ = output.images
__magic_name__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
__magic_name__ = image[0, -3:, -3:, -1]
__magic_name__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
__magic_name__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__magic_name__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__magic_name__ = '''A red cartoon frog, 4k'''
__magic_name__ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
__magic_name__ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__magic_name__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
__magic_name__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
__magic_name__ , __magic_name__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__magic_name__ = pipeline(
UpperCamelCase_ , image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
__magic_name__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 190
|
"""simple docstring"""
from __future__ import annotations
from random import choice
def lowercase ( __UpperCamelCase ) -> Any:
return choice(__UpperCamelCase )
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> int:
__magic_name__ = random_pivot(__UpperCamelCase )
# partition based on pivot
# linear time
__magic_name__ = [e for e in lst if e < pivot]
__magic_name__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__UpperCamelCase ) < k - 1:
return kth_number(__UpperCamelCase , k - len(__UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 1
|
'''simple docstring'''
def A ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
"""simple docstring"""
from timeit import timeit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
_lowerCAmelCase : Dict = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
_lowerCAmelCase : Optional[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE ( ) -> None:
def do_benchmark(_lowerCamelCase : int ) -> None:
_lowerCAmelCase : Dict = """import __main__ as z"""
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(_lowerCamelCase ) = }" )
_lowerCAmelCase : List[Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" ,setup=_lowerCamelCase )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(_lowerCamelCase ) = }" )
_lowerCAmelCase : str = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" ,setup=_lowerCamelCase ,)
print(f"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(_lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 213
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=7 , _lowercase :Tuple=3 , _lowercase :Union[str, Any]=18 , _lowercase :Union[str, Any]=30 , _lowercase :str=400 , _lowercase :Optional[int]=True , _lowercase :List[Any]=None , _lowercase :List[str]=True , _lowercase :Tuple=False , _lowercase :Optional[int]=True , _lowercase :List[str]=True , _lowercase :Optional[Any]=[0.5, 0.5, 0.5] , _lowercase :int=[0.5, 0.5, 0.5] , ) -> List[Any]:
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCAmelCase_ = do_thumbnail
UpperCAmelCase_ = do_align_axis
UpperCAmelCase_ = do_pad
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def __a ( self :Any) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] =DonutImageProcessor if is_vision_available() else None
def __a ( self :List[str]) -> Union[str, Any]:
UpperCAmelCase_ = DonutImageProcessingTester(self)
@property
def __a ( self :Tuple) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :List[str]) -> Optional[int]:
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_lowercase , '''do_resize'''))
self.assertTrue(hasattr(_lowercase , '''size'''))
self.assertTrue(hasattr(_lowercase , '''do_thumbnail'''))
self.assertTrue(hasattr(_lowercase , '''do_align_long_axis'''))
self.assertTrue(hasattr(_lowercase , '''do_pad'''))
self.assertTrue(hasattr(_lowercase , '''do_normalize'''))
self.assertTrue(hasattr(_lowercase , '''image_mean'''))
self.assertTrue(hasattr(_lowercase , '''image_std'''))
def __a ( self :Tuple) -> int:
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20})
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
# Previous config had dimensions in (width, height) order
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84))
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42})
def __a ( self :int) -> str:
pass
@is_flaky()
def __a ( self :Union[str, Any]) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __a ( self :Any) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __a ( self :Optional[int]) -> Dict:
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase)
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor)
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 720
|
import re
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if len(re.findall('''[ATCG]''' , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 561
| 0
|
def __UpperCAmelCase ( __a : str ,__a : str ) -> list:
"""simple docstring"""
_a : Tuple = len(__a )
_a : str = []
for i in range(len(__a ) - pat_len + 1 ):
_a : Any = True
for j in range(__a ):
if s[i + j] != pattern[j]:
_a : Optional[int] = False
break
if match_found:
position.append(__a )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 14
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = 'Hello world! cécé herlolip'
def lowerCamelCase__ ( A__ : str , A__ : str , A__ : bool ):
'''simple docstring'''
__lowerCamelCase = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
__lowerCamelCase = roberta.model.encoder.sentence_encoder
__lowerCamelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , A__ )
__lowerCamelCase = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCamelCase = roberta_sent_encoder.embed_tokens.weight
__lowerCamelCase = roberta_sent_encoder.embed_positions.weight
__lowerCamelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCamelCase = roberta_sent_encoder.layer_norm.weight
__lowerCamelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCamelCase = model.roberta.encoder.layer[i]
__lowerCamelCase = roberta_sent_encoder.layers[i]
__lowerCamelCase = layer.attention
__lowerCamelCase = roberta_layer.self_attn_layer_norm.weight
__lowerCamelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCamelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCamelCase = roberta_layer.self_attn.q_proj.weight
__lowerCamelCase = roberta_layer.self_attn.q_proj.bias
__lowerCamelCase = roberta_layer.self_attn.k_proj.weight
__lowerCamelCase = roberta_layer.self_attn.k_proj.bias
__lowerCamelCase = roberta_layer.self_attn.v_proj.weight
__lowerCamelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCamelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCamelCase = roberta_layer.self_attn.out_proj.weight
__lowerCamelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCamelCase = roberta_layer.final_layer_norm.weight
__lowerCamelCase = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCamelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# output
__lowerCamelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCamelCase = roberta_layer.fca.weight
__lowerCamelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCamelCase = roberta.model.classification_heads["""mnli"""].dense.weight
__lowerCamelCase = roberta.model.classification_heads["""mnli"""].dense.bias
__lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.weight
__lowerCamelCase = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCamelCase = roberta.model.encoder.lm_head.dense.weight
__lowerCamelCase = roberta.model.encoder.lm_head.dense.bias
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCamelCase = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCamelCase = roberta.model.encoder.lm_head.weight
__lowerCamelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCamelCase = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
__lowerCamelCase = model(A__ )[0]
if classification_head:
__lowerCamelCase = roberta.model.classification_heads["""mnli"""](roberta.extract_features(A__ ) )
else:
__lowerCamelCase = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
__lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
__lowerCamelCase = torch.allclose(A__ , A__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase_ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 714
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase_ = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
UpperCAmelCase_ = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ' Hello world! cécé herlolip'
UpperCAmelCase_ = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase__ ( A__ : Tuple , A__ : Any , A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dct.pop(A__ )
__lowerCamelCase = val
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
__lowerCamelCase = torch.load(A__ , map_location="""cpu""" )
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(A__ , A__ , bias=A__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase__ ( A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict=None ):
'''simple docstring'''
if not os.path.exists(A__ ):
__lowerCamelCase = torch.hub.load("""pytorch/fairseq""" , A__ ).eval()
else:
__lowerCamelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__lowerCamelCase = checkpoint_path.replace(""".""" , """-""" )
__lowerCamelCase = BartConfig.from_pretrained(A__ )
__lowerCamelCase = bart.encode(A__ ).unsqueeze(0 )
__lowerCamelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
f'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
__lowerCamelCase = bart.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
__lowerCamelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = bart.predict("""mnli""" , A__ , return_logits=A__ )
__lowerCamelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
__lowerCamelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
__lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCamelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
__lowerCamelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
__lowerCamelCase = model(A__ ).model[0]
else:
__lowerCamelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , """lm_head""" ):
__lowerCamelCase = make_linear_from_emb(model.model.shared )
__lowerCamelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
UpperCAmelCase_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 80
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.