code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from copy import deepcopy
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase = None , __lowercase = None ) -> None:
if arr is None and size is not None:
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : List[Any] = [0] * size
elif arr is not None:
self.init(__lowercase )
else:
raise ValueError('''Either arr or size must be specified''' )
def lowercase_ ( self , __lowercase ) -> None:
lowerCAmelCase_ : List[Any] = len(__lowercase )
lowerCAmelCase_ : Optional[Any] = deepcopy(__lowercase )
for i in range(1 , self.size ):
lowerCAmelCase_ : Any = self.next_(__lowercase )
if j < self.size:
self.tree[j] += self.tree[i]
def lowercase_ ( self ) -> list[int]:
lowerCAmelCase_ : Union[str, Any] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase_ : List[Any] = self.next_(__lowercase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowercase_ ( __lowercase ) -> int:
return index + (index & (-index))
@staticmethod
def lowercase_ ( __lowercase ) -> int:
return index - (index & (-index))
def lowercase_ ( self , __lowercase , __lowercase ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase_ : List[Any] = self.next_(__lowercase )
def lowercase_ ( self , __lowercase , __lowercase ) -> None:
self.add(__lowercase , value - self.get(__lowercase ) )
def lowercase_ ( self , __lowercase ) -> int:
if right == 0:
return 0
lowerCAmelCase_ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase_ : str = self.prev(__lowercase )
return result
def lowercase_ ( self , __lowercase , __lowercase ) -> int:
return self.prefix(__lowercase ) - self.prefix(__lowercase )
def lowercase_ ( self , __lowercase ) -> int:
return self.query(__lowercase , index + 1 )
def lowercase_ ( self , __lowercase ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase_ : Dict = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase_ : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase ( lowerCAmelCase_ )-> Union[str, Any]:
return EnvironmentCommand()
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def lowercase_ ( __lowercase ) -> List[Any]:
lowerCAmelCase_ : List[str] = parser.add_parser('''env''' )
download_parser.set_defaults(func=__lowercase )
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Optional[Any] = huggingface_hub.__version__
lowerCAmelCase_ : str = '''not installed'''
lowerCAmelCase_ : str = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase_ : Any = torch.__version__
lowerCAmelCase_ : str = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = '''not installed'''
if is_transformers_available():
import transformers
lowerCAmelCase_ : Any = transformers.__version__
lowerCAmelCase_ : Optional[Any] = '''not installed'''
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : List[Any] = accelerate.__version__
lowerCAmelCase_ : List[str] = '''not installed'''
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : int = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__lowercase ) )
return info
@staticmethod
def lowercase_ ( __lowercase ) -> str:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 262
| 1
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : Any = image.size
lowercase__ : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ : int = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowercase__ : Dict = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
lowercase__ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ : int = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class __A ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : VQModel ,_snake_case : UNetaDModel ,_snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__a ,unet=__a ,scheduler=__a )
@torch.no_grad()
def __call__( self : Tuple ,_snake_case : Union[torch.Tensor, PIL.Image.Image] = None ,_snake_case : Optional[int] = 1 ,_snake_case : Optional[int] = 100 ,_snake_case : Optional[float] = 0.0 ,_snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_snake_case : Optional[str] = "pil" ,_snake_case : bool = True ,) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(__a ,PIL.Image.Image ):
lowercase__ : Union[str, Any] = 1
elif isinstance(__a ,torch.Tensor ):
lowercase__ : str = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__a )}""" )
if isinstance(__a ,PIL.Image.Image ):
lowercase__ : Tuple = preprocess(__a )
lowercase__ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ : Optional[int] = next(self.unet.parameters() ).dtype
lowercase__ : int = randn_tensor(__a ,generator=__a ,device=self.device ,dtype=__a )
lowercase__ : Union[str, Any] = image.to(device=self.device ,dtype=__a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__a ,device=self.device )
lowercase__ : List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ : str = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ : str = {}
if accepts_eta:
lowercase__ : Optional[Any] = eta
for t in self.progress_bar(__a ):
# concat latents and low resolution image in the channel dimension.
lowercase__ : int = torch.cat([latents, image] ,dim=1 )
lowercase__ : Optional[int] = self.scheduler.scale_model_input(__a ,__a )
# predict the noise residual
lowercase__ : List[str] = self.unet(__a ,__a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : List[Any] = self.scheduler.step(__a ,__a ,__a ,**__a ).prev_sample
# decode the image latents with the VQVAE
lowercase__ : Any = self.vqvae.decode(__a ).sample
lowercase__ : List[Any] = torch.clamp(__a ,-1.0 ,1.0 )
lowercase__ : Union[str, Any] = image / 2 + 0.5
lowercase__ : str = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowercase__ : str = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 350
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = XLMProphetNetTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = "[PAD]"
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase ) , 1012 )
def SCREAMING_SNAKE_CASE_( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = XLMProphetNetTokenizer(lowercase , keep_accents=lowercase )
lowerCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = "Hello World!"
lowerCamelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> str:
# fmt: off
lowerCamelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 19
|
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=True ) -> Any:
SCREAMING_SNAKE_CASE__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any=True ) -> Union[str, Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# there is always at least one resnet
SCREAMING_SNAKE_CASE__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE__ = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
def __call__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=True ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 165
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Any=18 ,lowerCamelCase__ : Tuple=30 ,lowerCamelCase__ : Tuple=400 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=True ,lowerCamelCase__ : str=None ,lowerCamelCase__ : Any=True ,):
'''simple docstring'''
_UpperCamelCase : Any = size if size is not None else {'shortest_edge': 20}
_UpperCamelCase : int = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCamelCase : List[str] = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : int = num_channels
_UpperCamelCase : Optional[int] = image_size
_UpperCamelCase : List[Any] = min_resolution
_UpperCamelCase : List[str] = max_resolution
_UpperCamelCase : int = do_resize
_UpperCamelCase : Optional[int] = size
_UpperCamelCase : str = do_center_crop
_UpperCamelCase : List[Any] = crop_size
_UpperCamelCase : Optional[Any] = do_flip_channel_order
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ ,'do_flip_channel_order' ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_UpperCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
_UpperCamelCase : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : int = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
_UpperCamelCase : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : List[str] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
_UpperCamelCase : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_UpperCamelCase : Optional[int] = image_processing(lowerCamelCase__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 236
| 1
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase : Dict ="base_with_context"
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[F"layers_{lyr_num}"]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[F"layers_{lyr_num}"]
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_lowerCamelCase )
A__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ = weights[F"layers_{lyr_num}"]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
A__ = ly_weight["self_attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = ly_weight["MultiHeadDotProductAttention_0"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def UpperCamelCase ( _lowerCamelCase : Dict ):
A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ = jnp.tree_util.tree_map(onp.array , _lowerCamelCase )
A__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
A__ = inference.parse_training_gin_file(_lowerCamelCase , _lowerCamelCase )
A__ = inference.InferenceModel(args.checkpoint_path , _lowerCamelCase )
A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
A__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _lowerCamelCase )
A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _lowerCamelCase )
A__ = load_decoder(ta_checkpoint["target"]["decoder"] , _lowerCamelCase )
A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
A__ = SpectrogramDiffusionPipeline(
notes_encoder=_lowerCamelCase , continuous_encoder=_lowerCamelCase , decoder=_lowerCamelCase , scheduler=_lowerCamelCase , melgan=_lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
main(args)
| 237
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""note_seq"""]
def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int:
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 237
| 1
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case = False
try:
__snake_case = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = [] ) -> Any:
_a = 0
_a = choices
_a = prompt
if sys.platform == "win32":
_a = '''*'''
else:
_a = '''➔ '''
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "" ) -> List[str]:
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __UpperCAmelCase )
else:
forceWrite(self.choices[index] , __UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]:
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(__UpperCAmelCase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = 1 ) -> Optional[int]:
_a = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__UpperCAmelCase )
move_cursor(__UpperCAmelCase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def _UpperCAmelCase ( self ) -> int:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def _UpperCAmelCase ( self ) -> str:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def _UpperCAmelCase ( self ) -> List[str]:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def _UpperCAmelCase ( self ) -> Tuple:
move_cursor(len(self.choices ) - self.position , '''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__UpperCAmelCase )] for number in range(10 )] )
def _UpperCAmelCase ( self ) -> str:
_a = int(chr(self.current_selection ) )
_a = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __UpperCAmelCase )
else:
return
else:
return
def _UpperCAmelCase ( self , __UpperCAmelCase = 0 ) -> Any:
if self.prompt:
linebreak()
forceWrite(self.prompt , '''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' )
_a = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__UpperCAmelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position , '''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
_a = int(builtins.input() )
except ValueError:
_a = default_choice
else:
_a = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , '''UP''' )
clear_line()
self.write_choice(__UpperCAmelCase , '''\n''' )
return choice
| 153
|
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 500000
__snake_case ,__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def A_ ( _lowerCAmelCase : datasets.Dataset, **_lowerCAmelCase : Dict ):
"""simple docstring"""
_a = dataset.map(**_lowerCAmelCase )
@get_duration
def A_ ( _lowerCAmelCase : datasets.Dataset, **_lowerCAmelCase : Dict ):
"""simple docstring"""
_a = dataset.filter(**_lowerCAmelCase )
def A_ ( ):
"""simple docstring"""
_a = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_a = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_a = generate_example_dataset(
os.path.join(_lowerCAmelCase, '''dataset.arrow''' ), _lowerCAmelCase, num_examples=_lowerCAmelCase )
_a = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=_lowerCAmelCase )
def tokenize(_lowerCAmelCase : Union[str, Any] ):
return tokenizer(examples['''text'''] )
_a = map(_lowerCAmelCase )
_a = map(_lowerCAmelCase, batched=_lowerCAmelCase )
_a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase )
with dataset.formatted_as(type='''numpy''' ):
_a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase )
with dataset.formatted_as(type='''pandas''' ):
_a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase )
with dataset.formatted_as(type='''torch''', columns='''numbers''' ):
_a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase )
with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ):
_a = map(_lowerCAmelCase, function=lambda _lowerCAmelCase : None, batched=_lowerCAmelCase )
_a = map(_lowerCAmelCase, function=_lowerCAmelCase, batched=_lowerCAmelCase )
_a = filter(_lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_lowerCAmelCase, '''wb''' ) as f:
f.write(json.dumps(_lowerCAmelCase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 153
| 1
|
import argparse
from collections import defaultdict
import yaml
lowercase = "docs/source/en/_toctree.yml"
def __UpperCAmelCase ( a_):
snake_case_ = defaultdict(a_)
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ = [key for key, value in counts.items() if value > 1]
snake_case_ = []
for duplicate_key in duplicates:
snake_case_ = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key})
if len(a_) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.')
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1])
# Sort
return sorted(a_ , key=lambda a_: s["title"].lower())
def __UpperCAmelCase ( a_=False):
with open(a_ , encoding='utf-8') as f:
snake_case_ = yaml.safe_load(f.read())
# Get to the API doc
snake_case_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ = content[api_idx]['sections']
# Then to the model doc
snake_case_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ = api_doc[model_idx]['sections']
snake_case_ = [(idx, section) for idx, section in enumerate(a_) if 'sections' in section]
snake_case_ = False
for idx, modality_doc in modalities_docs:
snake_case_ = modality_doc['sections']
snake_case_ = clean_model_doc_toc(a_)
if old_modality_doc != new_modality_doc:
snake_case_ = True
if overwrite:
snake_case_ = new_modality_doc
if diff:
if overwrite:
snake_case_ = model_doc
snake_case_ = api_doc
with open(a_ , 'w' , encoding='utf-8') as f:
f.write(yaml.dump(a_ , allow_unicode=a_))
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.')
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 178
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """beit"""
def __init__( self : str , UpperCAmelCase : Union[str, Any]=8192 , UpperCAmelCase : int=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Optional[Any]="gelu" , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-12 , UpperCAmelCase : List[str]=224 , UpperCAmelCase : Any=16 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Dict=False , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=[3, 5, 7, 11] , UpperCAmelCase : List[str]=[1, 2, 3, 6] , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=0.4 , UpperCAmelCase : Any=256 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Tuple=False , UpperCAmelCase : Any=255 , **UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = image_size
lowerCamelCase__ : Optional[int] = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = use_mask_token
lowerCamelCase__ : Any = use_absolute_position_embeddings
lowerCamelCase__ : int = use_relative_position_bias
lowerCamelCase__ : str = use_shared_relative_position_bias
lowerCamelCase__ : Union[str, Any] = layer_scale_init_value
lowerCamelCase__ : Any = drop_path_rate
lowerCamelCase__ : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ : Optional[int] = out_indices
lowerCamelCase__ : Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ : List[Any] = use_auxiliary_head
lowerCamelCase__ : Tuple = auxiliary_loss_weight
lowerCamelCase__ : Tuple = auxiliary_channels
lowerCamelCase__ : List[Any] = auxiliary_num_convs
lowerCamelCase__ : Tuple = auxiliary_concat_input
lowerCamelCase__ : Any = semantic_loss_ignore_index
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = version.parse("""1.11""" )
@property
def A_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def A_ ( self : Optional[int] ) -> float:
return 1e-4
| 45
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return 10 - x * x
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) >= 0:
raise ValueError('Wrong space!' )
lowerCamelCase__ : Tuple = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(_UpperCAmelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_UpperCAmelCase ) * equation(_UpperCAmelCase ) < 0:
lowerCamelCase__ : Tuple = c
else:
lowerCamelCase__ : Optional[int] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 45
| 1
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : List[Any] , __lowercase : int , __lowercase : int ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
__a = img
__a = img.shape[1]
__a = img.shape[0]
__a = dst_width
__a = dst_height
__a = self.src_w / self.dst_w
__a = self.src_h / self.dst_h
__a = __a = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__a = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )]
def UpperCamelCase_ ( self : str , __lowercase : int ):
'''simple docstring'''
return int(self.ratio_x * x )
def UpperCamelCase_ ( self : str , __lowercase : int ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ = 800, 600
lowerCamelCase__ = imread("""image_data/lena.jpg""", 1)
lowerCamelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE__ = "true"
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=8_2 , _UpperCamelCase : int=1_6 ) -> str:
"""simple docstring"""
set_seed(4_2 )
snake_case = RegressionModel()
snake_case = deepcopy(_UpperCamelCase )
snake_case = RegressionDataset(length=_UpperCamelCase )
snake_case = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase )
model.to(accelerator.device )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return model, ddp_model, dataloader
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
snake_case = dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1_6 )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator(dispatch_batches=_UpperCamelCase , split_batches=_UpperCamelCase )
snake_case = get_dataloader(_UpperCamelCase , not dispatch_batches )
snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCamelCase )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case = []
for batch in dataloader:
snake_case ,snake_case = batch.values()
with torch.no_grad():
snake_case = model(_UpperCamelCase )
snake_case ,snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case ,snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
snake_case ,snake_case = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Tuple=8_2 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=False , _UpperCamelCase : List[str]=1_6 ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case = get_basic_setup(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case ,snake_case = generate_predictions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def lowerCAmelCase__ ( _UpperCamelCase : bool = False , _UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case ,snake_case = get_mrpc_setup(_UpperCamelCase , _UpperCamelCase )
# First do baseline
snake_case ,snake_case ,snake_case = setup['no']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase , references=batch['labels'] )
snake_case = metric.compute()
# Then do distributed
snake_case ,snake_case ,snake_case = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case = batch['labels']
snake_case ,snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase , references=_UpperCamelCase )
snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase , _UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
snake_case = Accelerator()
test_torch_metrics(_UpperCamelCase , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 149
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( _UpperCamelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
snake_case = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
snake_case = soup.findAll('h1' )
snake_case = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCamelCase , _UpperCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 149
| 1
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Tuple , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_lowerCAmelCase )
# standard deviation of the initial noise distribution
lowercase :List[str] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase :List[str] = 4
# running values
lowercase :Optional[Any] = []
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
lowercase :Optional[int] = num_inference_steps
lowercase :Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase :Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase :int = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase :Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2
lowercase :Union[str, Any] = (1.0 - self.betas**2) ** 0.5
lowercase :Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase :int = timesteps.to(_lowerCAmelCase )
lowercase :str = []
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase :Optional[Any] = (self.timesteps == timestep).nonzero().item()
lowercase :Any = timestep_index + 1
lowercase :List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowerCAmelCase )
if len(self.ets ) == 1:
lowercase :int = self.ets[-1]
elif len(self.ets ) == 2:
lowercase :Optional[int] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase :Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase :Any = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase :Dict = self._get_prev_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: torch.FloatTensor , *_lowerCAmelCase: int , **_lowerCAmelCase: Dict ):
return sample
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Any , _lowerCAmelCase: List[str] ):
lowercase :List[Any] = self.alphas[timestep_index]
lowercase :int = self.betas[timestep_index]
lowercase :List[Any] = self.alphas[prev_timestep_index]
lowercase :str = self.betas[prev_timestep_index]
lowercase :int = (sample - sigma * ets) / max(_lowerCAmelCase , 1e-8 )
lowercase :Optional[int] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self: int ):
return self.config.num_train_timesteps
| 236
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = "▁"
_UpperCAmelCase : int = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"
),
},
"spm_file": {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"
)
},
}
_UpperCAmelCase : str = {
"facebook/s2t-small-librispeech-asr": 1024,
}
_UpperCAmelCase : List[str] = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"]
_UpperCAmelCase : Tuple = {"mustc": MUSTC_LANGS}
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = MAX_MODEL_INPUT_SIZES
_a = ['''input_ids''', '''attention_mask''']
_a = []
def __init__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: List[str]="</s>" , _lowerCAmelCase: Optional[int]="<pad>" , _lowerCAmelCase: List[str]="<unk>" , _lowerCAmelCase: Dict=False , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Dict=None , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Tuple , ):
lowercase :str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_upper_case=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , lang_codes=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :List[str] = do_upper_case
lowercase :List[str] = do_lower_case
lowercase :str = load_json(_lowerCAmelCase )
lowercase :List[Any] = {v: k for k, v in self.encoder.items()}
lowercase :str = spm_file
lowercase :Any = load_spm(_lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
lowercase :List[str] = lang_codes
lowercase :Tuple = LANGUAGES[lang_codes]
lowercase :Any = [F"<lang:{lang}>" for lang in self.langs]
lowercase :Any = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
lowercase :Optional[Any] = self.lang_tokens
lowercase :List[str] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
lowercase :Tuple = {}
@property
def SCREAMING_SNAKE_CASE ( self: Tuple ):
return len(self.encoder )
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: Optional[Any] ):
lowercase :Any = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str ):
lowercase :Any = self.lang_code_to_id[tgt_lang]
lowercase :List[Any] = [lang_code_id]
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: Tuple ):
return self.encoder.get(_lowerCAmelCase , self.encoder[self.unk_token] )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: int ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :int = []
lowercase :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :Dict = self.sp_model.decode(_lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase :int = [1] * len(self.prefix_tokens )
lowercase :List[str] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :int = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[Any] ):
lowercase :Union[str, Any] = self.__dict__.copy()
lowercase :Dict = None
return state
def __setstate__( self: Union[str, Any] , _lowerCAmelCase: Dict ):
lowercase :List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :List[Any] = {}
lowercase :Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
lowercase :List[str] = Path(_lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
lowercase :List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase :Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :int = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (str(_lowerCAmelCase ), str(_lowerCAmelCase ))
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase )
spm.Load(str(lowerCamelCase ) )
return spm
def UpperCAmelCase__ ( lowerCamelCase ):
with open(lowerCamelCase, "r" ) as f:
return json.load(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
with open(lowerCamelCase, "w" ) as f:
json.dump(lowerCamelCase, lowerCamelCase, indent=2 )
| 236
| 1
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_A : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_A : Any = [0, 25, 50]
_A : Dict = [25, 50, 75]
_A : Any = fuzz.membership.trimf(X, abca)
_A : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_A : List[str] = np.ones(75)
_A : Optional[int] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_A : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_A : Union[str, Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_A : List[Any] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_A : List[Any] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_A : Optional[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_A : Dict = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_A : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 265
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 265
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = u
for i in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = temp * (u - i)
return temp
def a__ ( ):
"""simple docstring"""
UpperCamelCase = int(input("enter the numbers of values: " ) )
UpperCamelCase = []
for _ in range(_SCREAMING_SNAKE_CASE ):
y.append([] )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
y[i].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = 0
print("enter the values of parameters in a list: " )
UpperCamelCase = list(map(_SCREAMING_SNAKE_CASE , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = float(input() )
UpperCamelCase = int(input("enter the value to interpolate: " ) )
UpperCamelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _SCREAMING_SNAKE_CASE ):
for j in range(n - i ):
UpperCamelCase = y[j + 1][i - 1] - y[j][i - 1]
UpperCamelCase = y[0][0]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
summ += (ucal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) * y[0][i]) / math.factorial(_SCREAMING_SNAKE_CASE )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 153
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase__ = datasets.load_iris()
lowerCAmelCase__ = np.array(data['''data'''])
lowerCAmelCase__ = np.array(data['''target'''])
lowerCAmelCase__ = data['''target_names''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = train_test_split(X, y)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return np.linalg.norm(np.array(_SCREAMING_SNAKE_CASE ) - np.array(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
UpperCamelCase = zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# List of distances of all points from the point to be classified
UpperCamelCase = []
for data_point in data:
UpperCamelCase = euclidean_distance(data_point[0] , _SCREAMING_SNAKE_CASE )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
UpperCamelCase = [i[1] for i in sorted(_SCREAMING_SNAKE_CASE )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase = Counter(_SCREAMING_SNAKE_CASE ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 153
| 1
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__SCREAMING_SNAKE_CASE : Tuple = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__SCREAMING_SNAKE_CASE : Union[str, Any] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__SCREAMING_SNAKE_CASE : Optional[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def UpperCamelCase ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , ):
_snake_case : str = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_snake_case : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )]
_snake_case : int = TER(
normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , )
_snake_case : str = sb_ter.corpus_score(lowercase_ , lowercase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 284
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0
| 284
| 1
|
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowercase ( lowerCAmelCase__ : Tuple ) -> str:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(lowerCAmelCase__ , '''_dynamo''' ):
return False
return isinstance(lowerCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : bool = True ) -> int:
__a = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__a = is_compiled_module(lowerCAmelCase__ )
if is_compiled:
__a = model
__a = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = model.module
if not keep_fpaa_wrapper:
__a = getattr(lowerCAmelCase__ , '''forward''' )
__a = model.__dict__.pop('''_original_forward''' , lowerCAmelCase__ )
if original_forward is not None:
while hasattr(lowerCAmelCase__ , '''__wrapped__''' ):
__a = forward.__wrapped__
if forward == original_forward:
break
__a = forward
if getattr(lowerCAmelCase__ , '''_converted_to_transformer_engine''' , lowerCAmelCase__ ):
convert_model(lowerCAmelCase__ , to_transformer_engine=lowerCAmelCase__ )
if is_compiled:
__a = model
__a = compiled_model
return model
def lowercase ( ) -> Optional[int]:
PartialState().wait_for_everyone()
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple ) -> int:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase__ , lowerCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
@contextmanager
def lowercase ( **lowerCAmelCase__ : List[str] ) -> List[str]:
for key, value in kwargs.items():
__a = str(lowerCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowercase ( lowerCAmelCase__ : Any ) -> Optional[int]:
if not hasattr(lowerCAmelCase__ , '''__qualname__''' ) and not hasattr(lowerCAmelCase__ , '''__name__''' ):
__a = getattr(lowerCAmelCase__ , '''__class__''' , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(lowerCAmelCase__ , '''__name__''' ):
return obj.__name__
return str(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> str:
for key, value in source.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = destination.setdefault(lowerCAmelCase__ , {} )
merge_dicts(lowerCAmelCase__ , lowerCAmelCase__ )
else:
__a = value
return destination
def lowercase ( lowerCAmelCase__ : int = None ) -> bool:
if port is None:
__a = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 45
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 45
| 1
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def a__ ( a__ ):
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
__SCREAMING_SNAKE_CASE = precision
__SCREAMING_SNAKE_CASE = ceil(precision / 14 )
__SCREAMING_SNAKE_CASE = 42_68_80 * Decimal(1_00_05 ).sqrt()
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 13_59_14_09
__SCREAMING_SNAKE_CASE = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
__SCREAMING_SNAKE_CASE = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
UpperCAmelCase : str = 5_0
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 356
|
'''simple docstring'''
import os
def a__ ( a__ = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 331
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""image_processor""", """tokenizer"""]
UpperCamelCase__ = """Pix2StructImageProcessor"""
UpperCamelCase__ = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Tuple = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self: Tuple , __lowerCamelCase: Any=None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[int] = 2048 , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCamelCase__: str = self.tokenizer
UpperCamelCase__: Optional[int] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCamelCase__: Any = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , max_patches=__lowerCamelCase , **__lowerCamelCase )
else:
# add pixel_values and bbox
UpperCamelCase__: Tuple = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , max_patches=__lowerCamelCase , header_text=__lowerCamelCase , **__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
UpperCamelCase__: List[str] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
if "attention_mask" in text_encoding:
UpperCamelCase__: Optional[int] = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCamelCase__: Dict = text_encoding.pop("input_ids" )
else:
UpperCamelCase__: Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def UpperCAmelCase_ ( self: List[str] , *__lowerCamelCase: Dict , **__lowerCamelCase: int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self: Dict , *__lowerCamelCase: Optional[Any] , **__lowerCamelCase: str ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Any = self.tokenizer.model_input_names
UpperCamelCase__: Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 149
|
from collections import deque
class _a :
"""simple docstring"""
def __init__( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = process_name # process name
UpperCamelCase__: Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__: Tuple = arrival_time
UpperCamelCase__: str = burst_time # remaining burst time
UpperCamelCase__: int = 0 # total time of the process wait in ready queue
UpperCamelCase__: List[Any] = 0 # time from arrival time to completion time
class _a :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] , __lowerCamelCase: deque[Process] , __lowerCamelCase: int , ):
'''simple docstring'''
UpperCamelCase__: List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__: Optional[Any] = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__: Optional[int] = queue
# current time
UpperCamelCase__: Any = current_time
# finished process is in this sequence queue
UpperCamelCase__: deque[Process] = deque()
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: str = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Dict = []
for i in range(len(__lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: int = []
for i in range(len(__lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: list[Process] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = []
for i in range(len(__lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def UpperCAmelCase_ ( self: int , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def UpperCAmelCase_ ( self: Optional[int] , __lowerCamelCase: Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCAmelCase_ ( self: List[Any] , __lowerCamelCase: deque[Process] ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of finished process
while len(__lowerCamelCase ) != 0:
UpperCamelCase__: int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__: Optional[int] = 0
# set the process's turnaround time because it is finished
UpperCamelCase__: Optional[Any] = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__: List[Any] = self.current_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: deque[Process] , __lowerCamelCase: int ):
'''simple docstring'''
UpperCamelCase__: deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__lowerCamelCase ) ):
UpperCamelCase__: str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__: Optional[int] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__: Optional[int] = 0
# set the finish time
UpperCamelCase__: Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__: Dict = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__lowerCamelCase )
self.finish_queue.extend(__lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__: Dict = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
A__: Any = Process('''P1''', 0, 53)
A__: Tuple = Process('''P2''', 0, 17)
A__: Tuple = Process('''P3''', 0, 68)
A__: Tuple = Process('''P4''', 0, 24)
A__: Any = 3
A__: str = [17, 25]
A__: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
A__: str = Process('''P1''', 0, 53)
A__: Union[str, Any] = Process('''P2''', 0, 17)
A__: Optional[Any] = Process('''P3''', 0, 68)
A__: str = Process('''P4''', 0, 24)
A__: Any = 3
A__: Optional[Any] = [17, 25]
A__: Any = deque([Pa, Pa, Pa, Pa])
A__: Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
A__: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 149
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a :List[Any] = logging.get_logger(__name__)
a :str = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = """big_bird"""
def __init__( self , _a=50_358 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=4_096 , _a=2 , _a=0.02 , _a=1E-1_2 , _a=True , _a=0 , _a=1 , _a=2 , _a=66 , _a="block_sparse" , _a=True , _a=False , _a=64 , _a=3 , _a=None , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_embeddings
SCREAMING_SNAKE_CASE__ : Dict = attention_type
SCREAMING_SNAKE_CASE__ : List[str] = use_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = block_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_random_blocks
SCREAMING_SNAKE_CASE__ : str = classifier_dropout
class __a (UpperCamelCase_):
'''simple docstring'''
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 56
|
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a :List[Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
a :str = json.load(f)
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _a ( self , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''facebook/wmt19-{pair}'''
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer(_a )
SCREAMING_SNAKE_CASE__ : Any = self.get_model(_a )
SCREAMING_SNAKE_CASE__ : Tuple = bleu_data[pair]["""src"""]
SCREAMING_SNAKE_CASE__ : Any = bleu_data[pair]["""tgt"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer(_a , return_tensors="""pt""" , truncation=_a , padding="""longest""" ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
SCREAMING_SNAKE_CASE__ : Dict = calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores["""bleu"""] , _a )
| 56
| 1
|
'''simple docstring'''
from __future__ import annotations
a : str = """#"""
class UpperCamelCase_ :
def __init__( self ) -> None:
UpperCAmelCase : dict = {}
def _lowercase( self , A ) -> None:
UpperCAmelCase : Any = self._trie
for char in text:
if char not in trie:
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Optional[Any] = trie[char]
UpperCAmelCase : List[str] = True
def _lowercase( self , A ) -> tuple | list:
UpperCAmelCase : List[Any] = self._trie
for char in prefix:
if char in trie:
UpperCAmelCase : Union[str, Any] = trie[char]
else:
return []
return self._elements(A )
def _lowercase( self , A ) -> tuple:
UpperCAmelCase : str = []
for c, v in d.items():
UpperCAmelCase : List[str] = [""" """] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
a : Any = Trie()
a : int = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def __lowerCamelCase ( _lowercase ) -> tuple:
UpperCAmelCase : Union[str, Any] = trie.find_word(_lowercase )
return tuple(string + word for word in suffixes )
def __lowerCamelCase ( ) -> None:
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 265
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265
| 1
|
def __lowerCamelCase ( snake_case__ = 10_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2**power
_SCREAMING_SNAKE_CASE = 0
while n:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 125
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = prime_factors(snake_case__ )
if is_square_free(snake_case__ ):
return -1 if len(snake_case__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 284
|
from __future__ import annotations
import math
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int, lowerCAmelCase_ : bool, lowerCAmelCase_ : list[int], lowerCAmelCase_ : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
return min(
minimax(depth + 1, node_index * 2, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ), )
def a_ ( ):
__lowerCAmelCase = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__lowerCAmelCase = math.log(len(lowerCAmelCase_ ), 2 )
print('Optimal value : ', end='' )
print(minimax(0, 0, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 284
| 1
|
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__ : int = TypeVar('T')
class UpperCAmelCase__ ( Generic[T]):
def __init__( self , lowercase = True ) -> None:
__UpperCamelCase = {} # dictionary of lists
__UpperCamelCase = directed
def __lowerCamelCase ( self , lowercase , lowercase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 360
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''Translation''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "dict"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default='''TranslationVariableLanguages''' , init=UpperCAmelCase_ , repr=UpperCAmelCase_)
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = sorted(set(self.languages ) ) if self.languages else None
__UpperCamelCase = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = set(self.languages )
if self.languages and set(lowercase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(lowercase ) - lang_set ) )}) are not in valid set ({', '.join(lowercase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCamelCase = []
for lang, text in translation_dict.items():
if isinstance(lowercase , lowercase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCamelCase , __UpperCamelCase = zip(*sorted(lowercase ) )
return {"language": languages, "translation": translations}
def __lowerCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 243
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 331
| 0
|
import os
def __lowerCAmelCase ()-> List[Any]:
"""simple docstring"""
snake_case_ = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , '''num.txt''' )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 267
|
# Function to print upper half of diamond (pyramid)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(0 , SCREAMING_SNAKE_CASE ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Dict:
"""simple docstring"""
for i in range(SCREAMING_SNAKE_CASE , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
UpperCAmelCase = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 267
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : List[str] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 56
|
'''simple docstring'''
import math
from collections.abc import Callable
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = xa
snake_case_ = xa
while True:
if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
snake_case_ = x_na - (
function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ = x_na
snake_case_ = x_na
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 56
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_a = True
while ask_again:
_a = input(_lowerCAmelCase )
try:
if default is not None and len(_lowerCAmelCase ) == 0:
return default
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any]=[], _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_a = BulletMenu(_lowerCAmelCase, _lowerCAmelCase )
_a = menu.run(default_choice=_lowerCAmelCase )
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = super()._format_usage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 153
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a (lowerCamelCase , unittest.TestCase ):
__a : str = KandinskyVaaInpaintPipeline
__a : int = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__a : Union[str, Any] = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__a : int = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__a : Optional[Any] = False
@property
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return 1_00
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase_ : Any = UNetaDConditionModel(**__magic_name__ )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = self.dummy_unet
UpperCAmelCase_ : str = self.dummy_movq
UpperCAmelCase_ : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__magic_name__ , )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create init_image
UpperCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Tuple = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
UpperCAmelCase_ : List[str] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ : List[Any] = 0
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : int = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : Optional[int] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : List[str] = self.pipeline_class(**__magic_name__ )
UpperCAmelCase_ : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : Dict = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ : List[str] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : str = '''a hat'''
UpperCAmelCase_ : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
UpperCAmelCase_ : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ : int = pipeline(
image=__magic_name__ , mask_image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
UpperCAmelCase_ : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 125
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Optional[int] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Optional[Any] = {
"yjernite/retribert-base-uncased": 5_12,
}
snake_case_ : Union[str, Any] = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = PRETRAINED_INIT_CONFIGURATION
__a : Union[str, Any] = RetriBertTokenizer
__a : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=True , __magic_name__ : int="[UNK]" , __magic_name__ : List[Any]="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]="[MASK]" , __magic_name__ : int=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Dict = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[int] = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Optional[int] = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : List[str] = do_lower_case
def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 125
| 1
|
from string import ascii_uppercase
a__: List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->str:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
A__ = ''''''
A__ = 0
A__ = 0
while div != 1:
A__ , A__ = divmod(UpperCamelCase__ , UpperCamelCase__ )
if base >= 11 and 9 < mod < 36:
A__ = ALPHABET_VALUES[str(UpperCamelCase__ )]
else:
A__ = str(UpperCamelCase__ )
new_value += actual_value
A__ = num // base
A__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(UpperCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 39
|
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_93_44,
"knot": 1.8_52,
}
a__: dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_77_77_77_78,
"mph": 0.6_21_37_11_92,
"knot": 0.5_39_95_68_03,
}
def UpperCamelCase__( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str )->float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
A__ = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(UpperCamelCase__ )}"
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__a ="""Salesforce/blip-image-captioning-base"""
__a =(
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
__a ="""image_captioner"""
__a =AutoModelForVisionaSeq
__a =["""image"""]
__a =["""text"""]
def __init__( self : Optional[int] , *__a : int , **__a : Any ):
requires_backends(self , ["vision"] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCamelCase__ ( self : int , __a : int ):
return self.pre_processor(images=__UpperCAmelCase , return_tensors="pt" )
def UpperCamelCase__ ( self : Optional[Any] , __a : Union[str, Any] ):
return self.model.generate(**__UpperCAmelCase )
def UpperCamelCase__ ( self : Dict , __a : List[str] ):
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0].strip()
| 63
|
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 243
| 0
|
import cmath
import math
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : Optional[Any] = math.radians(_lowerCamelCase )
A : Dict = math.radians(_lowerCamelCase )
# Convert voltage and current to rectangular form
A : Optional[int] = cmath.rect(_lowerCamelCase , _lowerCamelCase )
A : str = cmath.rect(_lowerCamelCase , _lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
|
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A , A : Optional[Any] = len(_lowerCamelCase ), len(grid[0] )
if (
min(_lowerCamelCase , _lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A : Tuple = 0
count += depth_first_search(_lowerCamelCase , row + 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , row - 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col + 1 , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col - 1 , _lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256
| 1
|
'''simple docstring'''
import pytest
UpperCAmelCase : Tuple = '__dummy_dataset1__'
UpperCAmelCase : int = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def a__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a__ ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dataset_loading_script_name
__SCREAMING_SNAKE_CASE = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=a__ )
__SCREAMING_SNAKE_CASE = script_dir / F'{script_name}.py'
with open(a__ , """w""" ) as f:
f.write(a__ )
return str(a__ )
| 267
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Union[str, Any] = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267
| 1
|
"""simple docstring"""
__A : Tuple = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Union[str, Any] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[str] = frozenset(["image"])
__A : Optional[Any] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : Optional[int] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : Tuple = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : List[str] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image", "mask_image"])
__A : List[str] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Tuple = frozenset(["example_image", "image", "mask_image"])
__A : Dict = frozenset(["class_labels"])
__A : str = frozenset(["class_labels"])
__A : str = frozenset(["batch_size"])
__A : Union[str, Any] = frozenset([])
__A : str = frozenset(["batch_size"])
__A : Optional[int] = frozenset([])
__A : Any = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : List[str] = frozenset(["prompt", "negative_prompt"])
__A : Tuple = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 326
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE ) as metadata_file:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = LukeConfig(use_entity_aware_attention=_SCREAMING_SNAKE_CASE , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''module''']
# Load the entity vocab file
_UpperCAmelCase = load_original_entity_vocab(_SCREAMING_SNAKE_CASE )
# add an entry for [MASK2]
_UpperCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_UpperCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCAmelCase = AddedToken('''<ent>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AddedToken('''<ent2>''' , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''r''' ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''MLukeTokenizer'''
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# Initialize the embeddings of the special tokens
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_UpperCAmelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_UpperCAmelCase = state_dict[bias_name]
_UpperCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_UpperCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_UpperCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCAmelCase = f'encoder.layer.{layer_index}.attention.self.'
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
_UpperCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCAmelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCAmelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_UpperCAmelCase = state_dict['''entity_predictions.bias''']
_UpperCAmelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_UpperCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_UpperCAmelCase = LukeForMaskedLM(config=_SCREAMING_SNAKE_CASE ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_UpperCAmelCase = state_dict[key]
else:
_UpperCAmelCase = state_dict[key]
_UpperCAmelCase , _UpperCAmelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
if set(_SCREAMING_SNAKE_CASE ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(_SCREAMING_SNAKE_CASE ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , task='''entity_classification''' )
_UpperCAmelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_UpperCAmelCase = (0, 9)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 33, 768) )
_UpperCAmelCase = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_UpperCAmelCase = torch.Size((1, 1, 768) )
_UpperCAmelCase = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_UpperCAmelCase = MLukeTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''Tokyo is the capital of <mask>.'''
_UpperCAmelCase = (24, 30)
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE , entity_spans=[span] , return_tensors='''pt''' )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = encoding['''input_ids'''][0].tolist()
_UpperCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_UpperCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.entity_logits[0][0].argmax().item()
_UpperCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_SCREAMING_SNAKE_CASE ) )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_UpperCAmelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for line in open(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = {}
for entry in data:
_UpperCAmelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_UpperCAmelCase = entity_id
break
_UpperCAmelCase = f'{language}:{entity_name}'
_UpperCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
__A : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 326
| 1
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Dict ) ->Dict:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=_UpperCamelCase , )
assert hasattr(self , '''env''' )
def snake_case__( self : str , _UpperCamelCase : Tuple=1 ) ->List[str]:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[int] ) ->int:
TrainingJobAnalytics(_UpperCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__( self : int ) ->List[str]:
# create estimator
snake_case_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _UpperCamelCase )
| 8
|
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> None:
UpperCamelCase = len(__a )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , __a ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ (self , __a , __a ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ (self , __a ) -> bool:
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase : List[Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> int:
require_version(deps[pkg] ,lowercase )
| 176
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
assert or_gate(0 ,0 ) == 0
assert or_gate(0 ,1 ) == 1
assert or_gate(1 ,0 ) == 1
assert or_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 176
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'laion/clap-htsat-unfused'
_UpperCAmelCase = tempfile.mkdtemp()
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_feature_extractor(do_normalize=UpperCAmelCase , padding_value=1.0 )
_UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(UpperCAmelCase , return_tensors='np' )
_UpperCAmelCase = processor(audios=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = processor(text=UpperCAmelCase )
_UpperCAmelCase = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(UpperCAmelCase )
_UpperCAmelCase = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 39
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_a = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "AutoTokenizer"
UpperCamelCase__ = ["tokenizer"]
UpperCamelCase__ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
super().__init__(UpperCAmelCase )
_UpperCAmelCase = speaker_embeddings
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , **UpperCAmelCase ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
_UpperCAmelCase = get_file_from_repo(
UpperCAmelCase , UpperCAmelCase , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
F"""`{os.path.join(UpperCAmelCase , UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
_UpperCAmelCase = None
else:
with open(UpperCAmelCase ) as speaker_embeddings_json:
_UpperCAmelCase = json.load(UpperCAmelCase )
else:
_UpperCAmelCase = None
_UpperCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(tokenizer=UpperCAmelCase , speaker_embeddings=UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="speaker_embeddings_path.json" , UpperCAmelCase="speaker_embeddings" , UpperCAmelCase = False , **UpperCAmelCase , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(UpperCAmelCase , UpperCAmelCase , 'v2' ) , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
_UpperCAmelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , UpperCAmelCase , F"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=UpperCAmelCase , )
_UpperCAmelCase = os.path.join(UpperCAmelCase , F"""{prompt_key}_{key}.npy""" )
_UpperCAmelCase = tmp_dict
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , 'w' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
super().save_pretrained(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase = None , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.speaker_embeddings[voice_preset]
_UpperCAmelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
_UpperCAmelCase = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , UpperCAmelCase ) , cache_dir=kwargs.pop('cache_dir' , UpperCAmelCase ) , force_download=kwargs.pop('force_download' , UpperCAmelCase ) , proxies=kwargs.pop('proxies' , UpperCAmelCase ) , resume_download=kwargs.pop('resume_download' , UpperCAmelCase ) , local_files_only=kwargs.pop('local_files_only' , UpperCAmelCase ) , use_auth_token=kwargs.pop('use_auth_token' , UpperCAmelCase ) , revision=kwargs.pop('revision' , UpperCAmelCase ) , )
if path is None:
raise ValueError(
F"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
_UpperCAmelCase = np.load(UpperCAmelCase )
return voice_preset_dict
def UpperCamelCase ( self , UpperCAmelCase = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="pt" , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(UpperCAmelCase , UpperCAmelCase ):
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCAmelCase = self._load_voice_preset(UpperCAmelCase )
else:
if isinstance(UpperCAmelCase , UpperCAmelCase ) and not voice_preset.endswith('.npz' ):
_UpperCAmelCase = voice_preset + '.npz'
_UpperCAmelCase = np.load(UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(UpperCAmelCase , **UpperCAmelCase )
_UpperCAmelCase = BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
_UpperCAmelCase = self.tokenizer(
UpperCAmelCase , return_tensors=UpperCAmelCase , padding='max_length' , max_length=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , add_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
if voice_preset is not None:
_UpperCAmelCase = voice_preset
return encoded_text
| 39
| 1
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[str] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :int ) -> Union[str, Any]:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[Any]:
UpperCAmelCase = None
ops.enable_eager_execution_internal()
UpperCAmelCase = tf.config.list_physical_devices('CPU' )
if len(lowercase_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCAmelCase = tf.config.list_logical_devices(device_type='CPU' )
UpperCAmelCase = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCAmelCase = GradientAccumulator()
UpperCAmelCase = tf.Variable([4.0, 3.0] )
UpperCAmelCase , UpperCAmelCase = create_optimizer(5E-5 , 10 , 5 )
UpperCAmelCase = tf.Variable([0.0, 0.0] , trainable=lowercase_ )
def accumulate_on_replica(lowercase_ :Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowercase_ :int , lowercase_ :int ):
with strategy.scope():
UpperCAmelCase = strategy.experimental_local_results(lowercase_ )
local_variables[0].assign(lowercase_ )
local_variables[1].assign(lowercase_ )
strategy.run(lowercase_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase_ )
def _check_local_values(lowercase_ :Optional[int] , lowercase_ :List[str] ):
UpperCAmelCase = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase_ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowercase_ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 181
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _lowercase):
snake_case__ = (DDPMScheduler,)
def _UpperCamelCase ( self : Any , **__UpperCamelCase : Tuple ) -> Optional[Any]:
_UpperCamelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**__UpperCamelCase )
return config
def _UpperCamelCase ( self : Optional[int] ) -> str:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _UpperCamelCase ( self : str ) -> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Optional[int]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> str:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _UpperCamelCase ( self : Optional[int] ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _UpperCamelCase ( self : List[Any] ) -> Dict:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCamelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCamelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = self.dummy_model()
_UpperCamelCase = self.dummy_sample_deter
_UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCamelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCamelCase = pred_prev_sample
_UpperCamelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCamelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
_UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
_UpperCamelCase = -1
else:
_UpperCamelCase = timesteps[i + 1]
_UpperCamelCase = scheduler.previous_timestep(__UpperCamelCase )
_UpperCamelCase = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> Any:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = [100, 87, 50, 1, 0]
_UpperCamelCase = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> str:
_UpperCamelCase = self.scheduler_classes[0]
_UpperCamelCase = self.get_scheduler_config()
_UpperCamelCase = scheduler_class(**__UpperCamelCase )
_UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 256
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase ( a__ : str , a__ : bool = True , a__ : float = math.inf , a__ : float = -math.inf , a__ : float = math.inf , a__ : float = -math.inf , a__ : bool = False , a__ : float = 100 , a__ : float = 0.01 , a__ : float = 1 , ) -> Any:
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(a__ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(a__ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase ( a__ : str , a__ : List[Any] ) -> Tuple:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase ( a__ : str , a__ : Optional[Any] ) -> Union[str, Any]:
return (3 * x**2) - (6 * y)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 256
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 288
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowercase__: str = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='test-config' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Dict = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowercase__: Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__: Tuple = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowercase__: int = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__: List[Any] = c.n_embd + 1 # int
lowercase__: Any = c.resid_pdrop + 1.0 # float
lowercase__: Any = not c.scale_attn_weights # bool
lowercase__: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = PretrainedConfig()
lowercase__: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowercase__: List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(lowerCAmelCase__ )}.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__: Optional[Any] = mock.Mock()
lowercase__: Tuple = 500
lowercase__: Any = {}
lowercase__: Dict = HTTPError
lowercase__: Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase__: Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
lowercase__: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowercase__: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
lowercase__: Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__: str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__: Dict = ['config.42.0.0.json']
lowercase__: int = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase__ , 'config.42.0.0.json' ) )
lowercase__: Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__: Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowercase__: Tuple = 'v4.0.0'
lowercase__ , lowercase__: List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__: Union[str, Any] = 'v3.0.0'
lowercase__: Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 288
| 1
|
_UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
_UpperCamelCase = frozenset([])
_UpperCamelCase = frozenset(['''image'''])
_UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_UpperCamelCase = frozenset(['''image'''])
_UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
_UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
_UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
_UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_UpperCamelCase = frozenset(['''image''', '''mask_image'''])
_UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
_UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
_UpperCamelCase = frozenset(['''class_labels'''])
_UpperCamelCase = frozenset(['''class_labels'''])
_UpperCamelCase = frozenset(['''batch_size'''])
_UpperCamelCase = frozenset([])
_UpperCamelCase = frozenset(['''batch_size'''])
_UpperCamelCase = frozenset([])
_UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
_UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
_UpperCamelCase = frozenset(['''input_tokens'''])
_UpperCamelCase = frozenset(['''input_tokens'''])
| 326
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : str
UpperCAmelCase_ : str =None
@staticmethod
def UpperCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] ="optuna"
@staticmethod
def UpperCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
return is_optuna_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
'''simple docstring'''
return run_hp_search_optuna(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return default_hp_space_optuna(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : List[str] ="ray"
UpperCAmelCase_ : Dict ="'ray[tune]'"
@staticmethod
def UpperCAmelCase ( ) -> str:
'''simple docstring'''
return is_ray_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return default_hp_space_ray(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Tuple ="sigopt"
@staticmethod
def UpperCAmelCase ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
return default_hp_space_sigopt(UpperCAmelCase )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="wandb"
@staticmethod
def UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return is_wandb_available()
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return run_hp_search_wandb(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return default_hp_space_wandb(UpperCAmelCase )
_UpperCamelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__( ) -> str:
__snake_case : Optional[int] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowercase ) > 0:
__snake_case : Dict = available_backends[0].name
if len(lowercase ) > 1:
logger.info(
f"""{len(lowercase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 326
| 1
|
from math import factorial
def snake_case ( snake_case__ :int = 100):
return sum(int(snake_case__) for x in str(factorial(snake_case__)))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 352
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> List[Any]:
_A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
))
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""")
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__)
return parser.parse_args()
def snake_case ( ) -> List[str]:
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_A = script_fpath.stem
_A = importlib.import_module(snake_case__)
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 81
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : List[str]=[10, 20, 30, 40] , UpperCAmelCase_ : Union[str, Any]=[1, 1, 2, 1] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]="relu" , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : str=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embeddings_size
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = len(UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : List[str] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A_ ( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = TFRegNetModel(config=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , training=UpperCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFRegNetForImageClassification(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , training=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Any =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A__ : int =(
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A__ : Any =False
A__ : Optional[Any] =False
A__ : Dict =False
A__ : List[str] =False
A__ : Union[str, Any] =False
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def A_ ( self : Tuple ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def A_ ( self : Any ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def A_ ( self : str ):
pass
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : Dict ):
def check_hidden_states_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) , training=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ = layer_type
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]={} ):
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ ).to_tuple()
def recursive_check(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCAmelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(UpperCAmelCase_ , UpperCAmelCase_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(UpperCAmelCase_ , UpperCAmelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {'output_hidden_states': True} )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {'output_hidden_states': True} )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Dict ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFRegNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def _lowercase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCAmelCase_ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , training=UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
| 176
|
import os
def _lowercase ( ) -> List[str]:
'''simple docstring'''
with open(os.path.dirname(UpperCamelCase_ ) + '/p022_names.txt' ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace('"' , '' ).split(',' )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase_ ):
for letter in name:
name_score += ord(UpperCamelCase_ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 176
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=3_0 , lowercase_=4_0_0 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 2_5_5 , lowercase_=True , ) -> int:
'''simple docstring'''
lowerCAmelCase_ = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self , lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase_ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = self.size['shortest_edge']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase_ = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: Dict = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase_ , 'do_pad' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
lowerCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase_ = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
lowerCAmelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
lowerCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 14
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14
| 1
|
'''simple docstring'''
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
return abs(lowerCAmelCase__ ) if a == 0 else greatest_common_divisor(b % a , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = y, x % y
return abs(lowerCAmelCase__ )
def a__ ( ) -> Optional[Any]:
try:
UpperCAmelCase__ : str = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
UpperCAmelCase__ : str = int(nums[0] )
UpperCAmelCase__ : Optional[int] = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowerCAmelCase__ , lowerCAmelCase__ )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 181
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181
| 1
|
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
__SCREAMING_SNAKE_CASE = '''BlipImageProcessor'''
__SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
super().__init__(__lowerCamelCase,__lowerCamelCase )
# add QFormer tokenizer
A__ = qformer_tokenizer
def __call__( self,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
A__ = BatchFeature()
if text is not None:
A__ = self.tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
encoding.update(__lowerCamelCase )
A__ = self.qformer_tokenizer(
text=__lowerCamelCase,add_special_tokens=__lowerCamelCase,padding=__lowerCamelCase,truncation=__lowerCamelCase,max_length=__lowerCamelCase,stride=__lowerCamelCase,pad_to_multiple_of=__lowerCamelCase,return_attention_mask=__lowerCamelCase,return_overflowing_tokens=__lowerCamelCase,return_special_tokens_mask=__lowerCamelCase,return_offsets_mapping=__lowerCamelCase,return_token_type_ids=__lowerCamelCase,return_length=__lowerCamelCase,verbose=__lowerCamelCase,return_tensors=__lowerCamelCase,**__lowerCamelCase,)
A__ = qformer_text_encoding.pop('''input_ids''' )
A__ = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
A__ = self.image_processor(__lowerCamelCase,return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*__lowerCamelCase,**__lowerCamelCase )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*__lowerCamelCase,**__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase ( self,__lowerCamelCase,**__lowerCamelCase ):
if os.path.isfile(__lowerCamelCase ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(__lowerCamelCase,exist_ok=__lowerCamelCase )
A__ = os.path.join(__lowerCamelCase,'''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowerCamelCase )
return super().save_pretrained(__lowerCamelCase,**__lowerCamelCase )
@classmethod
def UpperCamelCase ( cls,__lowerCamelCase,**__lowerCamelCase ):
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase,subfolder='''qformer_tokenizer''' )
A__ = cls._get_arguments_from_pretrained(__lowerCamelCase,**__lowerCamelCase )
args.append(__lowerCamelCase )
return cls(*__lowerCamelCase )
| 360
|
def UpperCamelCase__( )->Dict:
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 19_01
A__ = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 39
| 0
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__lowerCamelCase ), magnitude * sin(__lowerCamelCase )]
return [magnitude * cos(radians(__lowerCamelCase ) ), magnitude * sin(radians(__lowerCamelCase ) )]
def _UpperCAmelCase ( __lowerCamelCase : NDArray[floataa] , __lowerCamelCase : NDArray[floataa] , __lowerCamelCase : float = 10**-1 ) -> bool:
_snake_case = cross(__lowerCamelCase , __lowerCamelCase )
_snake_case = sum(__lowerCamelCase )
return abs(__lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
UpperCAmelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 288
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[Any]:
return 1 / (1 + np.exp(-z ))
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict ) -> List[str]:
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=7_00_00 ) -> Optional[Any]:
_snake_case = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = np.dot(x.T , h - y ) / y.size
_snake_case = theta - alpha * gradient # updating the weights
_snake_case = np.dot(__lowerCamelCase , __lowerCamelCase )
_snake_case = sigmoid_function(__lowerCamelCase )
_snake_case = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_00 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase__ = datasets.load_iris()
UpperCAmelCase__ = iris.data[:, :2]
UpperCAmelCase__ = (iris.target != 0) * 1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 0].min(), x[:, 0].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = (x[:, 1].min(), x[:, 1].max())
((UpperCAmelCase__) , (UpperCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 288
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Any =MBartConfig
__UpperCAmelCase : Tuple ={}
__UpperCAmelCase : Optional[Any] ='''gelu'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
__lowerCAmelCase = inputs_dict["input_ids"]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase = inputs_dict["head_mask"]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
__lowerCAmelCase = past_key_values[1]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCAmelCase : Dict =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : Optional[int] =(
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[Any] =True
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
def snake_case ( self , __a , __a , __a , __a , __a ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case ( self ):
__lowerCAmelCase = TFMBartModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase_ )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =[
''' UN Chief Says There Is No Military Solution in Syria''',
]
__UpperCAmelCase : Dict =[
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
__UpperCAmelCase : int ='''facebook/mbart-large-en-ro'''
@cached_property
def snake_case ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case ( self , **__a ):
__lowerCAmelCase = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def snake_case ( self , **__a ):
__lowerCAmelCase = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors="tf" )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__lowerCAmelCase = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def snake_case ( self ):
self._assert_generated_batch_equal_expected()
| 355
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Any = "bert-base-cased"
A : Any = "google/pegasus-xsum"
A : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : Optional[int] = "patrickvonplaten/t5-tiny-random"
A : int = "sshleifer/bart-tiny-random"
A : Optional[int] = "sshleifer/tiny-mbart"
A : Any = "sshleifer/tiny-marian-en-de"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "\n".join(_UpperCamelCase )
Path(_UpperCamelCase ).open("w" ).writelines(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.source" ) , _UpperCamelCase )
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.target" ) , _UpperCamelCase )
return tmp_dir
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=20 , max_target_length=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 1_28 , __a )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__lowerCAmelCase = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__lowerCAmelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["input_ids"].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f"too many tokens in {len(__a )} batches" )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=5_12 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(__a , shuffle=__a )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k="labels" ) ) < sum(count_pad_tokens(__a , k="labels" ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def snake_case ( self , __a=10_00 , __a=1_28 ):
if os.getenv("USE_REAL_DATA" , __a ):
__lowerCAmelCase = "examples/seq2seq/wmt_en_ro"
__lowerCAmelCase = max_len * 2 * 64
if not Path(__a ).joinpath("train.len" ).exists():
save_len_file(__a , __a )
else:
__lowerCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCAmelCase = max_len * 4
save_len_file(__a , __a )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 259
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : str = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
lowerCamelCase_ : Optional[int] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
lowerCamelCase_ : Tuple = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a =len(self.sp_model ) - 1
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a =[]
a =''''''
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a =True
a =[]
else:
current_sub_tokens.append(__A )
a =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ) -> Tuple:
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , __A ) -> Tuple:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 81
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = CTRLTokenizer
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = False
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''adapt react readapt apt'''
__lowerCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = '''adapt react readapt apt'''
__lowerCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
| 362
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
__lowerCAmelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__lowercase , __lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase )
def _snake_case (self , **__lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self ):
shutil.rmtree(self.tmpdirname )
def _snake_case (self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase )
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowercase )
self.assertIsInstance(processor_fast.tokenizer , __lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowercase )
self.assertIsInstance(processor_fast.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase )
__lowerCAmelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' )
__lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' )
__lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = len(__lowercase )
__lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = '''google/owlvit-base-patch32'''
__lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase )
__lowerCAmelCase = ['''cat''', '''nasa badge''']
__lowerCAmelCase = processor(text=__lowercase )
__lowerCAmelCase = 16
__lowerCAmelCase = inputs['''input_ids''']
__lowerCAmelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__lowercase ):
processor()
def _snake_case (self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__lowercase )
__lowerCAmelCase = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
| 9
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14
|
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14
| 1
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] , __A : GenericTensor ):
if self.framework == "tf":
snake_case__ : int = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case__ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase ( self : str , __A : GenericTensor ):
snake_case__ : Tuple = self.get_masked_index(__A )
snake_case__ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def _lowercase ( self : List[Any] , __A : GenericTensor ):
if isinstance(__A , __A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def _lowercase ( self : Union[str, Any] , __A : Union[str, Any] , __A : List[str]=None , **__A : List[str] ):
if return_tensors is None:
snake_case__ : Tuple = self.framework
snake_case__ : Union[str, Any] = self.tokenizer(__A , return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def _lowercase ( self : List[Any] , __A : Dict ):
snake_case__ : int = self.model(**__A )
snake_case__ : str = model_inputs["input_ids"]
return model_outputs
def _lowercase ( self : List[str] , __A : List[Any] , __A : str=5 , __A : Union[str, Any]=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case__ : List[Any] = target_ids.shape[0]
snake_case__ : int = model_outputs["input_ids"][0]
snake_case__ : Dict = model_outputs["logits"]
if self.framework == "tf":
snake_case__ : List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case__ : List[Any] = outputs.numpy()
snake_case__ : Any = outputs[0, masked_index, :]
snake_case__ : str = stable_softmax(__A , axis=-1 )
if target_ids is not None:
snake_case__ : List[Any] = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case__ : List[str] = tf.expand_dims(__A , 0 )
snake_case__ : Optional[int] = tf.math.top_k(__A , k=__A )
snake_case__ : int = topk.values.numpy(), topk.indices.numpy()
else:
snake_case__ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case__ : Union[str, Any] = outputs[0, masked_index, :]
snake_case__ : str = logits.softmax(dim=-1 )
if target_ids is not None:
snake_case__ : Tuple = probs[..., target_ids]
snake_case__ : Optional[Any] = probs.topk(__A )
snake_case__ : int = []
snake_case__ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case__ : Dict = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case__ : Optional[Any] = input_ids.numpy().copy()
if target_ids is not None:
snake_case__ : str = target_ids[p].tolist()
snake_case__ : Tuple = p
# Filter padding out:
snake_case__ : Dict = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case__ : str = self.tokenizer.decode(__A , skip_special_tokens=__A )
snake_case__ : Optional[int] = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def _lowercase ( self : str , __A : int , __A : Optional[Any]=None ):
if isinstance(__A , __A ):
snake_case__ : str = [targets]
try:
snake_case__ : Dict = self.tokenizer.get_vocab()
except Exception:
snake_case__ : List[Any] = {}
snake_case__ : Union[str, Any] = []
for target in targets:
snake_case__ : str = vocab.get(__A , __A )
if id_ is None:
snake_case__ : Any = self.tokenizer(
__A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )["input_ids"]
if len(__A ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
snake_case__ : int = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
snake_case__ : List[str] = list(set(__A ) )
if len(__A ) == 0:
raise ValueError("At least one target must be provided when passed." )
snake_case__ : Dict = np.array(__A )
return target_ids
def _lowercase ( self : Tuple , __A : List[str]=None , __A : Any=None ):
snake_case__ : Optional[int] = {}
if targets is not None:
snake_case__ : Union[str, Any] = self.get_target_ids(__A , __A )
snake_case__ : List[str] = target_ids
if top_k is not None:
snake_case__ : Dict = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self : List[str] , __A : List[Any] , *__A : List[str] , **__A : Optional[int] ):
snake_case__ : Optional[int] = super().__call__(__A , **__A )
if isinstance(__A , __A ) and len(__A ) == 1:
return outputs[0]
return outputs
| 360
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roberta-prelayernorm"
def __init__( self : Tuple , __A : Any=5_0_2_6_5 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : List[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[Any]=5_1_2 , __A : List[str]=2 , __A : Optional[int]=0.0_2 , __A : Tuple=1e-1_2 , __A : Any=1 , __A : str=0 , __A : int=2 , __A : List[str]="absolute" , __A : Optional[Any]=True , __A : List[Any]=None , **__A : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286
| 0
|
from collections import deque
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = len(__lowerCAmelCase )
__A = deque()
__A = [False for _ in range(__lowerCAmelCase )]
__A = [-1 for _ in range(__lowerCAmelCase )]
__A = index_of[:]
def strong_connect(a_ , a_ , a_ ):
__A = index # the number when this node is seen
__A = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
__A = True
for w in g[v]:
if index_of[w] == -1:
__A = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__A = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__A = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__A = []
__A = stack.pop()
__A = False
component.append(__lowerCAmelCase )
while w != v:
__A = stack.pop()
__A = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
__A = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def UpperCAmelCase ( a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE :Optional[Any] = 7
SCREAMING_SNAKE_CASE :List[Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE :int = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE :int = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE :Dict = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 15
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 1
_snake_case = 3
_snake_case = (32, 32)
_snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def A ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=lowercase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type='v_prediction' )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowercase , generator=lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_snake_case = output.images
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowercase , generator=lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=lowercase , )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
_snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_snake_case = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type='v_prediction' )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_snake_case = output.images
assert image.shape[0] == 2
_snake_case = torch.Generator(device=lowercase ).manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowercase , generator=lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
_snake_case = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def A ( self : Any ):
'''simple docstring'''
_snake_case = self.dummy_cond_unet_upscale
_snake_case = DDPMScheduler()
_snake_case = DDIMScheduler(prediction_type='v_prediction' )
_snake_case = self.dummy_vae
_snake_case = self.dummy_text_encoder
_snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_snake_case = unet.half()
_snake_case = text_encoder.half()
# make sure here that pndm scheduler skips prk
_snake_case = StableDiffusionUpscalePipeline(
unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=350 , )
_snake_case = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
_snake_case = 'A painting of a squirrel eating a burger'
_snake_case = torch.manual_seed(0 )
_snake_case = sd_pipe(
[prompt] , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='np' , ).images
_snake_case = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
_snake_case = 'stabilityai/stable-diffusion-x4-upscaler'
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
_snake_case = 'a cat sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def A ( self : str ):
'''simple docstring'''
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
_snake_case = 'stabilityai/stable-diffusion-x4-upscaler'
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(
lowercase , torch_dtype=torch.floataa , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
_snake_case = 'a cat sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , generator=lowercase , output_type='np' , )
_snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def A ( self : Optional[int] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
_snake_case = 'stabilityai/stable-diffusion-x4-upscaler'
_snake_case = StableDiffusionUpscalePipeline.from_pretrained(
lowercase , torch_dtype=torch.floataa , )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_snake_case = 'a cat sitting on a park bench'
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowercase , image=lowercase , generator=lowercase , num_inference_steps=5 , output_type='np' , )
_snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 130
|
import baseaa
def a_ ( __lowercase : str ) -> bytes:
return baseaa.aaaencode(string.encode('utf-8' ) )
def a_ ( __lowercase : bytes ) -> str:
return baseaa.aaadecode(__lowercase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 130
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Dict = (DDPMParallelScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : int = self.dummy_sample_deter + 0.1
__lowerCAmelCase : Tuple = self.dummy_sample_deter - 0.1
__lowerCAmelCase : Tuple = samplea.shape[0]
__lowerCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase : str = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase : Tuple = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Any = pred_prev_sample
__lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
__lowerCAmelCase : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = pred_prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[int] = -1
else:
__lowerCAmelCase : Dict = timesteps[i + 1]
__lowerCAmelCase : Optional[int] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [1_00, 87, 50, 1, 0]
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 86
|
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# Return True if there is node that has not iterated.
UpperCamelCase :Tuple = [False] * len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Tuple = []
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :int = True
while queue:
UpperCamelCase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = True
UpperCamelCase :Optional[int] = u
return visited[t]
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ):
# This array is filled by BFS and to store path
UpperCamelCase :Optional[int] = [-1] * (len(SCREAMING_SNAKE_CASE__ ))
UpperCamelCase :Optional[int] = 0
while bfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = float('''Inf''' )
UpperCamelCase :str = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase :Optional[Any] = min(SCREAMING_SNAKE_CASE__ , graph[parent[s]][s] )
UpperCamelCase :Any = parent[s]
max_flow += path_flow
UpperCamelCase :Tuple = sink
while v != source:
UpperCamelCase :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase :Any = parent[v]
return max_flow
__snake_case = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case , __snake_case = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259
| 0
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = []
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
A = nums.pop(0 )
A = permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
def backtrack(lowercase__ ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
A , A = nums[i], nums[start]
backtrack(start + 1 )
A , A = nums[i], nums[start] # backtrack
A = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__A : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 352
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( lowercase__=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("env" )
else:
A = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase__ ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase__ ),
"PyTorch NPU available": str(lowercase__ ),
"System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else F"""\t{accelerate_config}"""
)
print(lowercase__ )
A = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = env_command_parser()
A = parser.parse_args()
env_command(lowercase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57
| 0
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase = True
except (ImportError, AttributeError):
lowercase = object
def __UpperCAmelCase ( *a_ , **a_):
pass
lowercase = False
lowercase = logging.get_logger("transformers-cli/serving")
def __UpperCAmelCase ( a_):
snake_case_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase__ , args.host , args.port , args.workers)
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( a ) -> Any:
snake_case_ = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=88_88 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , a , a , a , a ) -> Any:
snake_case_ = pipeline
snake_case_ = host
snake_case_ = port
snake_case_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case_ = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=6_00 , )
def _UpperCamelCase ( self ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _UpperCamelCase ( self ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCamelCase ( self , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Any:
try:
snake_case_ = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
snake_case_ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def _UpperCamelCase ( self , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> List[Any]:
try:
snake_case_ = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def _UpperCamelCase ( self , a=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Optional[int]:
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case_ = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(5_00 , {'error': str(lowerCAmelCase__ )} )
| 178
|
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str]=None )-> Any:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
UpperCAmelCase__ : Dict = nn.Parameter(snake_case )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
UpperCAmelCase__ : Union[str, Any] = nn.Parameter(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple , snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : str = np.asarray(weights[0] )
UpperCAmelCase__ : int = np.asarray(weights[1] )
UpperCAmelCase__ : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : List[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = np.asarray(weights[0] )
UpperCAmelCase__ : Any = np.asarray(weights[1] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[2] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case ).transpose(1 , 2 ).contiguous().view(-1 , snake_case ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case ).view(-1 , snake_case ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any , snake_case : int )-> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = weights[0][0][0]
UpperCAmelCase__ : Optional[Any] = np.asarray(layer_norm_a[0] )
UpperCAmelCase__ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# lsh weights + output
UpperCAmelCase__ : List[Any] = weights[0][1]
if len(snake_case ) < 4:
set_layer_weights_in_torch_lsh(snake_case , torch_block.attention , snake_case )
else:
set_layer_weights_in_torch_local(snake_case , torch_block.attention , snake_case )
# intermediate weighs
UpperCAmelCase__ : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case ) == 4:
UpperCAmelCase__ : int = intermediate_weights[2]
# layernorm 2
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# intermediate dense
UpperCAmelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
# intermediate out
UpperCAmelCase__ : Any = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase__ : Optional[int] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str )-> List[str]:
'''simple docstring'''
UpperCAmelCase__ : str = torch_model.reformer
# word embeds
UpperCAmelCase__ : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case ) , )
if isinstance(weights[3] , snake_case ):
UpperCAmelCase__ : Optional[int] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
UpperCAmelCase__ : List[str] = nn.Parameter(torch.tensor(snake_case ) )
UpperCAmelCase__ : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case , snake_case , snake_case )
# output layer norm
UpperCAmelCase__ : List[str] = np.asarray(weights[7][0] )
UpperCAmelCase__ : Optional[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case ) , torch.tensor(snake_case ) , )
# output embeddings
UpperCAmelCase__ : Optional[Any] = np.asarray(weights[9][0] )
UpperCAmelCase__ : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case ) , )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : int = ReformerConfig.from_json_file(snake_case )
print(f'Building PyTorch model from configuration: {config}' )
UpperCAmelCase__ : Tuple = ReformerModelWithLMHead(snake_case )
with open(snake_case , "rb" ) as f:
UpperCAmelCase__ : Dict = pickle.load(snake_case )["weights"]
set_model_weights_in_torch(snake_case , snake_case , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 298
|
"""simple docstring"""
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any:
'''simple docstring'''
UpperCAmelCase__ : str = args.log_outputs
UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
UpperCAmelCase__ : List[str] = load_metric("wer" )
UpperCAmelCase__ : Tuple = load_metric("cer" )
# compute metrics
UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] )
UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case )
with open(f'{dataset_id}_eval_results.txt' , "w" ) as f:
f.write(snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt'
UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt'
with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case : List[Any] , snake_case : List[str] ):
p.write(f'{i}' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(f'{i}' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case , with_indices=snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str:
'''simple docstring'''
UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) )
return text
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCAmelCase__ : str = feature_extractor.sampling_rate
# resample audio
UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) )
# load eval pipeline
if args.device is None:
UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1
UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case : Any ):
UpperCAmelCase__ : List[str] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCAmelCase__ : List[Any] = prediction["text"]
UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case , snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 298
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_a ) -> Any:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a : Dict = deprecated_arg[3:]
setattr(self , snake_case_ , not kwargs.pop(snake_case_ ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
_a : Optional[int] = kwargs.pop('''torchscript''' , self.torchscript )
_a : List[Any] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
_a : Optional[int] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**snake_case_ )
UpperCAmelCase__ : bool = field(default=UpperCAmelCase__ , metadata={"help": "Trace the models using torchscript"} )
UpperCAmelCase__ : bool = field(default=UpperCAmelCase__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
UpperCAmelCase__ : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __lowercase ( self ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
_a : Optional[Any] = torch.device('''cpu''' )
_a : Optional[int] = 0
elif is_torch_tpu_available():
_a : int = xm.xla_device()
_a : Union[str, Any] = 0
else:
_a : Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
_a : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def __lowercase ( self ) -> Dict:
return is_torch_tpu_available() and self.tpu
@property
def __lowercase ( self ) -> Dict:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowercase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def __lowercase ( self ) -> int:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def __lowercase ( self ) -> List[Any]:
return self.n_gpu > 0
| 235
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 286
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class __UpperCAmelCase :
__lowercase = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """The column name of the images in the files."""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """A folder containing the training data."""} )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
__lowercase = field(
default=0.1_5 , metadata={"""help""": """Percent to split off of train for validation."""} )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {}
if self.train_dir is not None:
_snake_case = self.train_dir
if self.validation_dir is not None:
_snake_case = self.validation_dir
_snake_case = data_files if data_files else None
@dataclass
class __UpperCAmelCase :
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
__lowercase = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__lowercase = field(default=_lowerCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
__lowercase = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__lowercase = field(
default=0.7_5 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
__lowercase = field(
default=_lowerCamelCase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[Any]:
_snake_case = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , __A , __A )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_snake_case = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0:
_snake_case = ds['train'].train_test_split(data_args.train_val_split )
_snake_case = split['train']
_snake_case = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_snake_case = ViTMAEConfig.from_pretrained(model_args.config_name , **__A )
elif model_args.model_name_or_path:
_snake_case = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__A )
else:
_snake_case = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_snake_case = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__A )
elif model_args.model_name_or_path:
_snake_case = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__A )
else:
_snake_case = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_snake_case = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_snake_case = ViTMAEForPreTraining(__A )
if training_args.do_train:
_snake_case = ds['train'].column_names
else:
_snake_case = ds['validation'].column_names
if data_args.image_column_name is not None:
_snake_case = data_args.image_column_name
elif "image" in column_names:
_snake_case = 'image'
elif "img" in column_names:
_snake_case = 'img'
else:
_snake_case = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_snake_case = image_processor.size['shortest_edge']
else:
_snake_case = (image_processor.size['height'], image_processor.size['width'])
_snake_case = Compose(
[
Lambda(lambda __A : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__A , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__A ):
_snake_case = [transforms(__A ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_snake_case = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_snake_case = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__A )
# Compute absolute learning rate
_snake_case = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_snake_case = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
_snake_case = Trainer(
model=__A , args=__A , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case = trainer.evaluate()
trainer.log_metrics('eval' , __A )
trainer.save_metrics('eval' , __A )
# Write model card and (optionally) push to hub
_snake_case = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 353
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple:
_snake_case = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130
| 1
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = 'vision-encoder-decoder'
lowerCAmelCase__ = True
def __init__( self , **UpperCAmelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
lowercase_ = kwargs.pop("encoder" )
lowercase_ = encoder_config.pop("model_type" )
lowercase_ = kwargs.pop("decoder" )
lowercase_ = decoder_config.pop("model_type" )
lowercase_ = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase_ = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowercase_ = True
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> PretrainedConfig:
'''simple docstring'''
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
lowercase_ = True
lowercase_ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
lowercase_ = self.encoder.to_dict()
lowercase_ = self.decoder.to_dict()
lowercase_ = self.__class__.model_type
return output
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ = version.parse("1.11" )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase_ = OrderedDict()
lowercase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
lowercase_ = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def A__ ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
import torch
lowercase_ = OrderedDict()
lowercase_ = super().generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
lowercase_ = dummy_input["input_ids"].shape
lowercase_ = (batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase_ = dummy_input.pop("input_ids" )
lowercase_ = dummy_input.pop("attention_mask" )
lowercase_ = torch.zeros(_SCREAMING_SNAKE_CASE )
return common_inputs
class __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def A__ ( self ) -> None:
'''simple docstring'''
pass
def A__ ( self , UpperCAmelCase ) -> OnnxConfig:
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_SCREAMING_SNAKE_CASE )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "default" ) -> OnnxConfig:
'''simple docstring'''
lowercase_ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 364
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.txt"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""",
"""facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/esm2_t6_8M_UR50D""": 1_0_2_4,
"""facebook/esm2_t12_35M_UR50D""": 1_0_2_4,
}
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any ):
'''simple docstring'''
with open(__lowerCamelCase , "r" ) as f:
lowercase_ = f.read().splitlines()
return [l.strip() for l in lines]
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase , UpperCAmelCase="<unk>" , UpperCAmelCase="<cls>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="<eos>" , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = load_vocab_file(UpperCAmelCase )
lowercase_ = dict(enumerate(self.all_tokens ) )
lowercase_ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowercase_ = unk_token
lowercase_ = cls_token
lowercase_ = pad_token
lowercase_ = mask_token
lowercase_ = eos_token
lowercase_ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return text.split()
def A__ ( self , UpperCAmelCase=False ) -> List[str]:
'''simple docstring'''
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self._token_to_id.get(UpperCAmelCase , self._token_to_id.get(self.unk_token ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self._id_to_token.get(UpperCAmelCase , self.unk_token )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.cls_token_id]
lowercase_ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowercase_ = [1] + ([0] * len(UpperCAmelCase )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCAmelCase ) + [1]
return mask
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = os.path.join(UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(UpperCAmelCase , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
'''simple docstring'''
return super()._add_tokens(UpperCAmelCase , special_tokens=UpperCAmelCase )
| 297
| 0
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = FlaxAutoencoderKL
@property
def __lowercase ( self : str ):
_a : Dict = 4
_a : Dict = 3
_a : str = (32, 32)
_a : Tuple = jax.random.PRNGKey(0 )
_a : List[str] = jax.random.uniform(_UpperCAmelCase ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __lowercase ( self : Tuple ):
_a : Any = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_a : Any = self.dummy_input
return init_dict, inputs_dict
| 89
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
lowercase = 42
lowercase = jnp.floataa
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , snake_case_ : List[str] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = hidden_states.shape
snake_case__ : List[Any] = jax.image.resize(
_snake_case , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
snake_case__ : Dict = self.conv(_snake_case )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
lowercase = 42
lowercase = jnp.floataa
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , snake_case_ : Optional[Any] ):
snake_case__ : List[str] = self.conv(_snake_case )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
lowercase = 42
lowercase = None
lowercase = 0.0
lowercase = None
lowercase = jnp.floataa
def lowerCamelCase ( self : int ):
snake_case__ : Tuple = self.in_channels if self.out_channels is None else self.out_channels
snake_case__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
snake_case__ : str = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : Any = nn.Dense(_snake_case , dtype=self.dtype )
snake_case__ : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
snake_case__ : Optional[Any] = nn.Dropout(self.dropout_prob )
snake_case__ : int = nn.Conv(
_snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
snake_case__ : str = None
if use_nin_shortcut:
snake_case__ : Optional[Any] = nn.Conv(
_snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int]=True ):
snake_case__ : List[Any] = hidden_states
snake_case__ : Tuple = self.norma(_snake_case )
snake_case__ : str = nn.swish(_snake_case )
snake_case__ : Union[str, Any] = self.conva(_snake_case )
snake_case__ : List[Any] = self.time_emb_proj(nn.swish(_snake_case ) )
snake_case__ : Optional[int] = jnp.expand_dims(jnp.expand_dims(_snake_case , 1 ) , 1 )
snake_case__ : Tuple = hidden_states + temb
snake_case__ : Optional[Any] = self.norma(_snake_case )
snake_case__ : int = nn.swish(_snake_case )
snake_case__ : Tuple = self.dropout(_snake_case , _snake_case )
snake_case__ : Tuple = self.conva(_snake_case )
if self.conv_shortcut is not None:
snake_case__ : Tuple = self.conv_shortcut(_snake_case )
return hidden_states + residual
| 361
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__a = logging.getLogger(__name__)
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple=None ):
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
snake_case__ : int = None
def lowerCamelCase ( self : int , snake_case_ : int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
snake_case__ : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
snake_case__ : int = str(distributed_port + 1 )
snake_case__ : List[str] = dist.new_group(ranks=snake_case_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase ( self : Optional[Any] ):
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase ( self : int , snake_case_ : str , snake_case_ : int , snake_case_ : int=torch.floataa ):
snake_case__ : str = torch.empty(snake_case_ , dtype=snake_case_ )
dist.scatter(snake_case_ , src=0 , scatter_list=snake_case_ , group=self.process_group )
return target_tensor
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
snake_case__ : Dict = next((addr for addr in addrs if addr.startswith("""e""" )) , snake_case_ )
return ifname
def lowerCamelCase ( self : Tuple , snake_case_ : np.ndarray , snake_case_ : int ):
# single GPU training
if not dist.is_initialized():
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
# distributed training
snake_case__ : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
snake_case__ : str = None
if self._is_main():
snake_case__ : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case_ )]
dist.gather(torch.tensor(snake_case_ ) , dst=0 , gather_list=snake_case_ , group=self.process_group )
# scatter logic
snake_case__ : Union[str, Any] = question_hidden_states.shape[0]
snake_case__ : List[str] = []
snake_case__ : Dict = []
if self._is_main():
assert len(snake_case_ ) == world_size
snake_case__ , snake_case__ : Union[str, Any] = self._main_retrieve(torch.cat(snake_case_ ).numpy() , snake_case_ )
snake_case__ , snake_case__ : Dict = torch.tensor(snake_case_ ), torch.tensor(snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self._scattered(snake_case_ , [n_queries, n_docs] , target_type=torch.intaa )
snake_case__ : Dict = self._scattered(snake_case_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case_ )
| 43
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Any = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
__UpperCamelCase : List[str] = DetaConfig(
backbone_config=snake_case__ , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
__UpperCamelCase : Optional[Any] = "huggingface/label-files"
if "o365" in model_name:
__UpperCamelCase : Dict = 366
__UpperCamelCase : int = "object365-id2label.json"
else:
__UpperCamelCase : int = 91
__UpperCamelCase : Union[str, Any] = "coco-detection-id2label.json"
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Optional[int] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="dataset" ) ) , "r" ) )
__UpperCamelCase : str = {int(snake_case__ ): v for k, v in idalabel.items()}
__UpperCamelCase : Dict = idalabel
__UpperCamelCase : int = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : List[Any] = dct.pop(snake_case__ )
__UpperCamelCase : str = val
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
__UpperCamelCase : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase : Optional[Any] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCamelCase : Optional[int] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : Any = in_proj_weight[:dim, :]
__UpperCamelCase : Optional[Any] = in_proj_bias[: dim]
__UpperCamelCase : Tuple = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase : List[Any] = in_proj_weight[
-dim :, :
]
__UpperCamelCase : Any = in_proj_bias[-dim :]
# fmt: on
def __lowerCAmelCase ( snake_case__ , snake_case__ ):
# transformer decoder self-attention layers
__UpperCamelCase : List[str] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase : List[str] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase : List[str] = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase : int = in_proj_weight[:hidden_size, :]
__UpperCamelCase : Union[str, Any] = in_proj_bias[:hidden_size]
__UpperCamelCase : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase : Tuple = in_proj_weight[-hidden_size:, :]
__UpperCamelCase : Tuple = in_proj_bias[-hidden_size:]
def __lowerCAmelCase ( ):
__UpperCamelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Optional[int] = get_deta_config(snake_case__ )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase : Optional[int] = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase : int = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
__UpperCamelCase : Optional[int] = torch.load(snake_case__ , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape )
# rename keys
__UpperCamelCase : Union[str, Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_swin_q_k_v(snake_case__ , config.backbone_config )
read_in_decoder_q_k_v(snake_case__ , snake_case__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase : Union[str, Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : List[str] = val
if "input_proj" in key:
__UpperCamelCase : Any = state_dict.pop(snake_case__ )
__UpperCamelCase : Optional[Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase : List[Any] = state_dict.pop(snake_case__ )
__UpperCamelCase : Tuple = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase : Dict = DetaForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
__UpperCamelCase : Any = "cuda" if torch.cuda.is_available() else "cpu"
model.to(snake_case__ )
# load image processor
__UpperCamelCase : Tuple = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
__UpperCamelCase : Dict = prepare_img()
__UpperCamelCase : Union[str, Any] = processor(images=snake_case__ , return_tensors="pt" )
__UpperCamelCase : str = encoding["pixel_values"]
__UpperCamelCase : str = model(pixel_values.to(snake_case__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase : Any = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCamelCase : Optional[int] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase : Tuple = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCamelCase : str = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1E-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCAmelCase = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 298
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298
| 1
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class snake_case ( UpperCAmelCase ):
__magic_name__ = 42
__magic_name__ = jnp.floataa
__magic_name__ = True
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setup()
a : str = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Any , *A : Any , **A : Optional[int] ):
'''simple docstring'''
a : List[str] = super().__call__(*A , **A )
a : Optional[Any] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class snake_case ( UpperCAmelCase ):
__magic_name__ = FlaxBigBirdForNaturalQuestionsModule
def snake_case (A_ :Any , A_ :Any , A_ :Dict , A_ :int , A_ :Optional[Any] , A_ :List[str] ):
'''simple docstring'''
def cross_entropy(A_ :List[str] , A_ :Optional[Any] , A_ :List[str]=None ):
a : Optional[Any] = logits.shape[-1]
a : List[str] = (labels[..., None] == jnp.arange(A_ )[None]).astype('f4' )
a : List[Any] = jax.nn.log_softmax(A_ , axis=-1 )
a : Union[str, Any] = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a : Tuple = reduction(A_ )
return loss
a : List[Any] = partial(A_ , reduction=jnp.mean )
a : str = cross_entropy(A_ , A_ )
a : List[Any] = cross_entropy(A_ , A_ )
a : List[str] = cross_entropy(A_ , A_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class snake_case :
__magic_name__ = '''google/bigbird-roberta-base'''
__magic_name__ = 3000
__magic_name__ = 1_0500
__magic_name__ = 128
__magic_name__ = 3
__magic_name__ = 1
__magic_name__ = 5
# tx_args
__magic_name__ = 3E-5
__magic_name__ = 0.0
__magic_name__ = 2_0000
__magic_name__ = 0.0095
__magic_name__ = '''bigbird-roberta-natural-questions'''
__magic_name__ = '''training-expt'''
__magic_name__ = '''data/nq-training.jsonl'''
__magic_name__ = '''data/nq-validation.jsonl'''
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=A )
a : Dict = os.path.join(self.base_dir , self.save_dir )
a : Optional[int] = self.batch_size_per_device * jax.device_count()
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = 4096 # no dynamic padding on TPUs
def __call__( self : List[Any] , A : Any ):
'''simple docstring'''
a : List[Any] = self.collate_fn(A )
a : Dict = jax.tree_util.tree_map(A , A )
return batch
def lowerCamelCase__ ( self : Optional[int] , A : Optional[int] ):
'''simple docstring'''
a : Dict = self.fetch_inputs(features['input_ids'] )
a : str = {
'input_ids': jnp.array(A , dtype=jnp.intaa ),
'attention_mask': jnp.array(A , dtype=jnp.intaa ),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa ),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa ),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa ),
}
return batch
def lowerCamelCase__ ( self : int , A : list ):
'''simple docstring'''
a : Tuple = [self._fetch_inputs(A ) for ids in input_ids]
return zip(*A )
def lowerCamelCase__ ( self : int , A : list ):
'''simple docstring'''
a : Dict = [1 for _ in range(len(A ) )]
while len(A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case (A_ :List[Any] , A_ :Dict , A_ :Optional[Any]=None ):
'''simple docstring'''
if seed is not None:
a : Any = dataset.shuffle(seed=A_ )
for i in range(len(A_ ) // batch_size ):
a : Optional[Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A_ )
@partial(jax.pmap , axis_name='batch' )
def snake_case (A_ :int , A_ :int , **A_ :Any ):
'''simple docstring'''
def loss_fn(A_ :Optional[Any] ):
a : Tuple = model_inputs.pop('start_labels' )
a : Tuple = model_inputs.pop('end_labels' )
a : Tuple = model_inputs.pop('pooled_labels' )
a : List[Any] = state.apply_fn(**A_ , params=A_ , dropout_rng=A_ , train=A_ )
a : Union[str, Any] = outputs
return state.loss_fn(
A_ , A_ , A_ , A_ , A_ , A_ , )
a : Any = jax.random.split(A_ )
a : int = jax.value_and_grad(A_ )
a : List[Any] = grad_fn(state.params )
a : Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a : Optional[int] = jax.lax.pmean(A_ , 'batch' )
a : Union[str, Any] = state.apply_gradients(grads=A_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def snake_case (A_ :List[str] , **A_ :str ):
'''simple docstring'''
a : Tuple = model_inputs.pop('start_labels' )
a : List[Any] = model_inputs.pop('end_labels' )
a : Dict = model_inputs.pop('pooled_labels' )
a : Dict = state.apply_fn(**A_ , params=state.params , train=A_ )
a : Any = outputs
a : Dict = state.loss_fn(A_ , A_ , A_ , A_ , A_ , A_ )
a : Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class snake_case ( train_state.TrainState ):
__magic_name__ = struct.field(pytree_node=UpperCAmelCase )
@dataclass
class snake_case :
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = None
def lowerCamelCase__ ( self : Any , A : Optional[Any] , A : Optional[Any] , A : Tuple , A : Optional[Any]=None ):
'''simple docstring'''
a : Union[str, Any] = model.params
a : Tuple = TrainState.create(
apply_fn=model.__call__ , params=A , tx=A , loss_fn=A , )
if ckpt_dir is not None:
a : Tuple = restore_checkpoint(A , A )
a : Union[str, Any] = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a : Any = build_tx(**A )
a : Any = train_state.TrainState(
step=A , apply_fn=model.__call__ , params=A , tx=A , opt_state=A , )
a : Tuple = args
a : List[str] = data_collator
a : Optional[Any] = lr
a : str = params
a : Tuple = jax_utils.replicate(A )
return state
def lowerCamelCase__ ( self : List[str] , A : List[str] , A : Dict , A : Union[str, Any] ):
'''simple docstring'''
a : int = self.args
a : Optional[Any] = len(A ) // args.batch_size
a : Optional[Any] = jax.random.PRNGKey(0 )
a : Optional[int] = jax.random.split(A , jax.device_count() )
for epoch in range(args.max_epochs ):
a : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
a : Optional[int] = get_batched_dataset(A , args.batch_size , seed=A )
a : Any = 0
for batch in tqdm(A , total=A , desc=F'''Running EPOCH-{epoch}''' ):
a : Any = self.data_collator(A )
a : Tuple = self.train_step_fn(A , A , **A )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
if i % args.logging_steps == 0:
a : Optional[Any] = jax_utils.unreplicate(state.step )
a : Tuple = running_loss.item() / i
a : Optional[int] = self.scheduler_fn(state_step - 1 )
a : Optional[Any] = self.evaluate(A , A )
a : List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(A ) )
self.logger.log(A , commit=A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=A )
def lowerCamelCase__ ( self : List[Any] , A : Optional[int] , A : Dict ):
'''simple docstring'''
a : Any = get_batched_dataset(A , self.args.batch_size )
a : Dict = len(A ) // self.args.batch_size
a : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa )
a : str = 0
for batch in tqdm(A , total=A , desc='Evaluating ... ' ):
a : Optional[int] = self.data_collator(A )
a : List[str] = self.val_step_fn(A , **A )
running_loss += jax_utils.unreplicate(metrics['loss'] )
i += 1
return running_loss / i
def lowerCamelCase__ ( self : int , A : int , A : Optional[Any] ):
'''simple docstring'''
a : List[Any] = jax_utils.unreplicate(A )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=' ... ' )
self.model_save_fn(A , params=state.params )
with open(os.path.join(A , 'opt_state.msgpack' ) , 'wb' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(A , 'args.joblib' ) )
joblib.dump(self.data_collator , os.path.join(A , 'data_collator.joblib' ) )
with open(os.path.join(A , 'training_state.json' ) , 'w' ) as f:
json.dump({'step': state.step.item()} , A )
print('DONE' )
def snake_case (A_ :List[Any] , A_ :Optional[int] ):
'''simple docstring'''
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=' ... ' )
with open(os.path.join(A_ , 'flax_model.msgpack' ) , 'rb' ) as f:
a : Union[str, Any] = from_bytes(state.params , f.read() )
with open(os.path.join(A_ , 'opt_state.msgpack' ) , 'rb' ) as f:
a : List[Any] = from_bytes(state.opt_state , f.read() )
a : Optional[int] = joblib.load(os.path.join(A_ , 'args.joblib' ) )
a : List[Any] = joblib.load(os.path.join(A_ , 'data_collator.joblib' ) )
with open(os.path.join(A_ , 'training_state.json' ) , 'r' ) as f:
a : List[Any] = json.load(A_ )
a : int = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def snake_case (A_ :Union[str, Any] , A_ :Optional[int] , A_ :int , A_ :Union[str, Any] ):
'''simple docstring'''
a : Any = num_train_steps - warmup_steps
a : Optional[Any] = optax.linear_schedule(init_value=A_ , end_value=A_ , transition_steps=A_ )
a : List[Any] = optax.linear_schedule(init_value=A_ , end_value=1E-7 , transition_steps=A_ )
a : Optional[int] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case (A_ :str , A_ :int , A_ :List[str] , A_ :Union[str, Any] , A_ :str ):
'''simple docstring'''
def weight_decay_mask(A_ :Optional[Any] ):
a : Union[str, Any] = traverse_util.flatten_dict(A_ )
a : Optional[int] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(A_ )
a : Any = scheduler_fn(A_ , A_ , A_ , A_ )
a : int = optax.adamw(learning_rate=A_ , weight_decay=A_ , mask=A_ )
return tx, lr
| 360
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 186
| 0
|
from __future__ import annotations
import time
import numpy as np
_lowerCAmelCase : Optional[Any] = [8, 5, 9, 7]
_lowerCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_lowerCAmelCase : Tuple = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
"""simple docstring"""
def __init__( self :Union[str, Any] , snake_case :str , snake_case :Dict , snake_case :Union[str, Any] , ):
'''simple docstring'''
A_ : Tuple = claim_vector
A_ : Any = allocated_resources_table
A_ : Union[str, Any] = maximum_claim_table
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :Union[str, Any] ):
'''simple docstring'''
A_ : int = self.__need()
A_ : int = self.__allocated_resources_table
A_ : Tuple = self.__available_resources()
A_ : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
A_ : str = False
for each_need in need_list:
A_ : List[str] = True
for index, need in enumerate(_UpperCAmelCase ):
if need > available_resources[index]:
A_ : int = False
break
if execution:
A_ : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
A_ : Tuple = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(_UpperCAmelCase )
# update available/freed resources stack
A_ : Tuple = np.array(_UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(_UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(_UpperCAmelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(_UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
"""simple docstring"""
A = 9.80665
def __A ( a_ :float , a_ :float , a_ :float = g) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''')
if volume < 0:
raise ValueError('''Impossible Object volume''')
if gravity <= 0:
raise ValueError('''Impossible Gravity''')
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 160
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
lowerCAmelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
lowerCAmelCase__ = controlnet_params
lowerCAmelCase__ = 'bird'
lowerCAmelCase__ = jax.device_count()
lowerCAmelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
lowerCAmelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase__ = jax.random.PRNGKey(0 )
lowerCAmelCase__ = jax.random.split(lowerCamelCase_ , jax.device_count() )
lowerCAmelCase__ = replicate(lowerCamelCase_ )
lowerCAmelCase__ = shard(lowerCamelCase_ )
lowerCAmelCase__ = shard(lowerCamelCase_ )
lowerCAmelCase__ = pipe(
prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
lowerCAmelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCamelCase_ , from_pt=lowerCamelCase_ , dtype=jnp.bfloataa )
lowerCAmelCase__ = controlnet_params
lowerCAmelCase__ = 'Chef in the kitchen'
lowerCAmelCase__ = jax.device_count()
lowerCAmelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
lowerCAmelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase__ = jax.random.PRNGKey(0 )
lowerCAmelCase__ = jax.random.split(lowerCamelCase_ , jax.device_count() )
lowerCAmelCase__ = replicate(lowerCamelCase_ )
lowerCAmelCase__ = shard(lowerCamelCase_ )
lowerCAmelCase__ = shard(lowerCamelCase_ )
lowerCAmelCase__ = pipe(
prompt_ids=lowerCamelCase_ , image=lowerCamelCase_ , params=lowerCamelCase_ , prng_seed=lowerCamelCase_ , num_inference_steps=50 , jit=lowerCamelCase_ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
lowerCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ = images[0, 2_53:2_56, 2_53:2_56, -1]
lowerCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 362
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( A , A , A , A , ) -> list[float]:
lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape
lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase__ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
lowerCAmelCase__ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
lowerCAmelCase__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
lowerCAmelCase__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
lowerCAmelCase__ = []
for row in range(A ):
lowerCAmelCase__ = 0
for col in range(A ):
if col == row:
lowerCAmelCase__ = table[row][col]
elif col == cols - 1:
lowerCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase__ = (temp + val) / denom
new_val.append(A )
lowerCAmelCase__ = new_val
return [float(A ) for i in new_val]
def _snake_case ( A ) -> bool:
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
lowerCAmelCase__ = True
for i in range(0 , A ):
lowerCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 0
|
import os
from math import logaa
def lowercase__ ( __snake_case : str = "base_exp.txt" ):
'''simple docstring'''
UpperCAmelCase_ : float = 0
UpperCAmelCase_ : Tuple = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = list(map(__snake_case , line.split(',' ) ) )
if x * logaa(__snake_case ) > largest:
UpperCAmelCase_ : Union[str, Any] = x * logaa(__snake_case )
UpperCAmelCase_ : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29
|
'''simple docstring'''
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase__ ( _A , _A ):
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE : Optional[Any] = TypeVar("T")
class _lowerCamelCase( Generic[T] ):
def __init__( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Any | T = None
_lowercase : int = len(lowerCamelCase)
_lowercase : list[T] = [any_type for _ in range(self.N)] + arr
_lowercase : Optional[int] = fnc
self.build()
def UpperCamelCase ( self) -> None:
"""simple docstring"""
for p in range(self.N - 1, 0, -1):
_lowercase : Optional[int] = self.fn(self.st[p * 2], self.st[p * 2 + 1])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
p += self.N
_lowercase : str = v
while p > 1:
_lowercase : List[Any] = p // 2
_lowercase : str = self.fn(self.st[p * 2], self.st[p * 2 + 1])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> T | None: # noqa: E741
"""simple docstring"""
_lowercase , _lowercase : List[Any] = l + self.N, r + self.N
_lowercase : T | None = None
while l <= r:
if l % 2 == 1:
_lowercase : Optional[Any] = self.st[l] if res is None else self.fn(lowerCamelCase, self.st[l])
if r % 2 == 0:
_lowercase : Tuple = self.st[r] if res is None else self.fn(lowerCamelCase, self.st[r])
_lowercase , _lowercase : List[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE : List[str] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE : str = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE : Tuple = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE : Optional[int] = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE : Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCamelCase_( ) -> None:
for i in range(len(lowerCamelCase_ ) ):
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
_lowercase : Dict = reduce(lowerCamelCase_ , test_array[i : j + 1] )
_lowercase : int = reduce(lowerCamelCase_ , test_array[i : j + 1] )
_lowercase : Optional[int] = reduce(lambda lowerCamelCase_ , lowerCamelCase_ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert max_range == max_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
assert sum_range == sum_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE : Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 84
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE : Tuple = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
_lowercase : Union[str, Any] = EfficientNetConfig()
_lowercase : Any = CONFIG_MAP[model_name]['hidden_dim']
_lowercase : Any = CONFIG_MAP[model_name]['width_coef']
_lowercase : Optional[int] = CONFIG_MAP[model_name]['depth_coef']
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : Tuple = CONFIG_MAP[model_name]['dropout_rate']
_lowercase : Dict = CONFIG_MAP[model_name]['dw_padding']
_lowercase : str = 'huggingface/label-files'
_lowercase : Optional[Any] = 'imagenet-1k-id2label.json'
_lowercase : List[Any] = 1000
_lowercase : str = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : Optional[int] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Tuple = CONFIG_MAP[model_name]['image_size']
_lowercase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowerCamelCase_ , )
return preprocessor
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Tuple = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
_lowercase : Tuple = sorted(set(lowerCamelCase_ ) )
_lowercase : List[Any] = len(lowerCamelCase_ )
_lowercase : List[str] = {b: str(lowerCamelCase_ ) for b, i in zip(lowerCamelCase_ , range(lowerCamelCase_ ) )}
_lowercase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
_lowercase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
_lowercase : Optional[Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_lowercase : str = 'efficientnet.' + item[1]
_lowercase : Optional[Any] = 'classifier.weight'
_lowercase : List[str] = 'classifier.bias'
return key_mapping
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_lowercase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
_lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_lowercase : Dict = torch.from_numpy(lowerCamelCase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_lowercase : Tuple = torch.from_numpy(np.transpose(lowerCamelCase_ ) )
else:
_lowercase : List[str] = torch.from_numpy(lowerCamelCase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase_ )
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : Any = model_classes[model_name](
include_top=lowerCamelCase_ , weights='imagenet' , input_tensor=lowerCamelCase_ , input_shape=lowerCamelCase_ , pooling=lowerCamelCase_ , classes=1000 , classifier_activation='softmax' , )
_lowercase : int = original_model.trainable_variables
_lowercase : Dict = original_model.non_trainable_variables
_lowercase : Optional[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_lowercase : int = param.numpy()
_lowercase : int = list(tf_params.keys() )
# Load HuggingFace model
_lowercase : int = get_efficientnet_config(lowerCamelCase_ )
_lowercase : List[str] = EfficientNetForImageClassification(lowerCamelCase_ ).eval()
_lowercase : str = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
_lowercase : Optional[int] = rename_keys(lowerCamelCase_ )
replace_params(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Initialize preprocessor and preprocess input image
_lowercase : Optional[Any] = convert_image_processor(lowerCamelCase_ )
_lowercase : Optional[Any] = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
_lowercase : Any = hf_model(**lowerCamelCase_ )
_lowercase : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
_lowercase : List[Any] = False
_lowercase : List[Any] = CONFIG_MAP[model_name]['image_size']
_lowercase : int = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_lowercase : Optional[Any] = image.img_to_array(lowerCamelCase_ )
_lowercase : Any = np.expand_dims(lowerCamelCase_ , axis=0 )
_lowercase : Optional[int] = original_model.predict(lowerCamelCase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase_ ):
os.mkdir(lowerCamelCase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase_ )
preprocessor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
_lowercase : str = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase_ )
hf_model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 84
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( __A : Callable[[int | float], int | float] , __A : int | float , __A : int | float , __A : int = 1_00 , ) -> float:
"""simple docstring"""
a_ : Optional[Any] = x_start
a_ : List[str] = fnc(__A )
a_ : str = 0.0
for _ in range(__A ):
# Approximates curve as a sequence of linear lines and sums their length
a_ : List[str] = (x_end - x_start) / steps + xa
a_ : List[Any] = fnc(__A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
a_ : Optional[int] = xa
a_ : int = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
UpperCAmelCase_ : List[str] = 10
while i <= 10_0000:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 32
|
import math
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCamelCase :List[str] = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCamelCase :str = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCamelCase :Tuple = [input_a, input_a, carry_in]
__UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 43
| 0
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase__ : Dict =yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
lowerCAmelCase__ : Optional[int] ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowerCAmelCase__ : str ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Optional[int] ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Dict ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
lowerCAmelCase__ : Union[str, Any] ='\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : List[str] =(
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
lowerCAmelCase__ : List[str] ='\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Tuple =(
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
lowerCAmelCase__ : str ='\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Tuple ='The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
lowerCAmelCase__ : str ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : int ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
lowerCAmelCase__ : Any ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
lowerCAmelCase__ : str ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
lowerCAmelCase__ : List[str] ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Any ='The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
lowerCAmelCase__ : Optional[Any] ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
lowerCAmelCase__ : Dict ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
lowerCAmelCase__ : str ='\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Optional[Any] ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
lowerCAmelCase__ : Optional[int] ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
lowerCAmelCase__ : Dict ='The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
lowerCAmelCase__ : int ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : Dict ='The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
lowerCAmelCase__ : Optional[Any] =''
lowerCAmelCase__ : Dict ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
lowerCAmelCase__ : Optional[int] ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
lowerCAmelCase__ : int ='The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def a__ ( A__, A__ ):
assert ReadMe.from_string(A__, A__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def a__ ( A__, A__ ):
with pytest.raises(A__, match=re.escape(expected_error.format(path='root' ) ) ):
SCREAMING_SNAKE_CASE_ : Tuple = ReadMe.from_string(A__, A__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def a__ ( A__, A__ ):
with pytest.raises(A__, match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(A__, A__ )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def a__ ( A__ ):
ReadMe.from_string(A__, A__, suppress_parsing_errors=A__ )
@pytest.mark.parametrize(
'readme_md, expected_dict', [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
], )
def a__ ( A__, A__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(A__ ) / 'README.md'
with open(A__, 'w+' ) as readme_file:
readme_file.write(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ReadMe.from_readme(A__, A__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
], )
def a__ ( A__, A__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : str = Path(A__ ) / 'README.md'
with open(A__, 'w+' ) as readme_file:
readme_file.write(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = expected_error.format(path=A__ )
with pytest.raises(A__, match=re.escape(A__ ) ):
SCREAMING_SNAKE_CASE_ : Tuple = ReadMe.from_readme(A__, A__ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error', [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
], )
def a__ ( A__, A__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : int = Path(A__ ) / 'README.md'
with open(A__, 'w+' ) as readme_file:
readme_file.write(A__ )
SCREAMING_SNAKE_CASE_ : int = expected_error.format(path=A__ )
with pytest.raises(A__, match=re.escape(A__ ) ):
ReadMe.from_readme(A__, A__ )
@pytest.mark.parametrize(
'readme_md,', [
(README_MULTIPLE_SAME_HEADING_1),
], )
def a__ ( A__ ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Dict = Path(A__ ) / 'README.md'
with open(A__, 'w+' ) as readme_file:
readme_file.write(A__ )
ReadMe.from_readme(A__, A__, suppress_parsing_errors=A__ )
| 162
|
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ : Optional[int] =re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCAmelCase__ : List[Any] =re.compile(R'([a-z\d])([A-Z])')
lowerCAmelCase__ : Dict =re.compile(R'(?<!_)_(?!_)')
lowerCAmelCase__ : int =re.compile(R'(_{2,})')
lowerCAmelCase__ : Optional[Any] =R'^\w+(\.\w+)*$'
lowerCAmelCase__ : List[Any] =R'<>:/\|?*'
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = _uppercase_uppercase_re.sub(r'\1_\2', A__ )
SCREAMING_SNAKE_CASE_ : List[str] = _lowercase_uppercase_re.sub(r'\1_\2', A__ )
return name.lower()
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = _single_underscore_re.split(A__ )
SCREAMING_SNAKE_CASE_ : str = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '' )
def a__ ( A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(A__ )
def a__ ( A__, A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, A__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(A__ )}-{split}'''
def a__ ( A__, A__, A__, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(A__, A__ )
return F'''{filepath}*'''
def a__ ( A__, A__, A__, A__=None, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(A__, A__ )
if shard_lengths:
SCREAMING_SNAKE_CASE_ : Dict = len(A__ )
SCREAMING_SNAKE_CASE_ : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(A__ )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_ : Optional[int] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 162
| 1
|
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : int ):
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
snake_case : Optional[int] = sum(lowercase__ ) / len(lowercase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""MobileViTFeatureExtractor"""]
UpperCamelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186
| 0
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCamelCase_ : List[str] = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
lowerCamelCase_ : str = F'https://www.google.com/search?q={query}&num=100'
lowerCamelCase_ : Dict = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
lowerCamelCase_ : Tuple = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
lowerCamelCase_ : Optional[Any] = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 215
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase_ : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( lowercase ):
"""simple docstring"""
with open(lowercase , '''rb''' ) as f:
a =Image.open(lowercase )
return im.convert('''RGB''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the training data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the validation data."} )
__lowerCAmelCase = field(
default=0.1_5, metadata={"help": "Percent to split off of train for validation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="google/vit-base-patch16-224-in21k", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
__lowerCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Name or path of preprocessor config."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, )
def _A ( lowercase ):
"""simple docstring"""
a =torch.stack([example['''pixel_values'''] for example in examples] )
a =torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
a ={}
if data_args.train_dir is not None:
a =os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
a =os.path.join(data_args.validation_dir , '''**''' )
a =load_dataset(
'''imagefolder''' , data_files=lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
a =None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
a =dataset['''train'''].train_test_split(data_args.train_val_split )
a =split['''train''']
a =split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a =dataset['''train'''].features['''labels'''].names
a , a ={}, {}
for i, label in enumerate(lowercase ):
a =str(lowercase )
a =label
# Load the accuracy metric from the datasets package
a =evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a =image_processor.size['''shortest_edge''']
else:
a =(image_processor.size['''height'''], image_processor.size['''width'''])
a =Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a =Compose(
[
RandomResizedCrop(lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a =Compose(
[
Resize(lowercase ),
CenterCrop(lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
a =[
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowercase ):
a =[_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
a =(
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
a =(
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase )
# Initalize our trainer
a =Trainer(
model=lowercase , args=lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a =trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase )
trainer.save_metrics('''eval''' , lowercase )
# Write model card and (optionally) push to hub
a ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main()
| 215
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
"""simple docstring"""
@staticmethod
def snake_case__ ( *lowercase_ : Tuple,**lowercase_ : List[str] )-> Optional[Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class A ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : Tuple )-> int:
'''simple docstring'''
A__ = pipeline('visual-question-answering',model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def snake_case__ ( self : Optional[Any],lowercase_ : List[str],lowercase_ : Dict )-> Tuple:
'''simple docstring'''
A__ = vqa_pipeline(lowercase_,top_k=1 )
self.assertEqual(
lowercase_,[
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}],
],)
@require_torch
def snake_case__ ( self : List[str] )-> Optional[int]:
'''simple docstring'''
A__ = pipeline('visual-question-answering',model='hf-internal-testing/tiny-vilt-random-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=lowercase_,question='How many cats are there?',top_k=2 )
self.assertEqual(
lowercase_,[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
A__ = vqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(
lowercase_,[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] )
@slow
@require_torch
def snake_case__ ( self : Dict )-> Optional[int]:
'''simple docstring'''
A__ = pipeline('visual-question-answering',model='dandelin/vilt-b32-finetuned-vqa' )
A__ = './tests/fixtures/tests_samples/COCO/000000039769.png'
A__ = 'How many cats are there?'
A__ = vqa_pipeline(image=lowercase_,question=lowercase_,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_,decimals=4 ),[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline({'image': image, 'question': question},top_k=2 )
self.assertEqual(
nested_simplify(lowercase_,decimals=4 ),[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
A__ = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 )
self.assertEqual(
nested_simplify(lowercase_,decimals=4 ),[[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2,)
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
pass
| 7
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# load longformer model from model identifier
a = LongformerModel.from_pretrained(__lowerCamelCase )
a = LightningModel(__lowerCamelCase )
a = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 228
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def lowerCamelCase_ ( lowerCAmelCase: Optional[Any] )-> Optional[Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def lowerCamelCase_ ( )-> Optional[Any]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_snake_case : List[str] = [1, 2, 3]
with pytest.raises(lowerCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=2 )
with pytest.raises(lowerCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def lowerCamelCase_ ( lowerCAmelCase: Dict )-> List[Any]:
_snake_case : int = [1, 2]
_snake_case : int = {'a': 1, 'b': 2}
_snake_case : Optional[int] = {'a': [1, 2], 'b': [3, 4]}
_snake_case : Tuple = {'a': {'1': 1}, 'b': 2}
_snake_case : Union[str, Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_snake_case : Optional[Any] = [2, 3]
_snake_case : int = {'a': 2, 'b': 3}
_snake_case : Any = {'a': [2, 3], 'b': [4, 5]}
_snake_case : str = {'a': {'1': 2}, 'b': 3}
_snake_case : List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) == expected_map_nested_sa
assert map_nested(lowerCAmelCase , lowerCAmelCase , num_proc=lowerCAmelCase ) == expected_map_nested_sa
| 260
|
import qiskit
def lowerCamelCase_ ( lowerCAmelCase: int = 2 )-> qiskit.result.counts.Counts:
_snake_case : Dict = qubits
# Using Aer's simulator
_snake_case : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
_snake_case : Tuple = qiskit.QuantumCircuit(lowerCAmelCase , lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCAmelCase ) ) , list(range(lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
_snake_case : Any = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=10_00 )
return job.result().get_counts(lowerCAmelCase )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 260
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , __A=3 , __A=0.6 , __A=None , ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :Optional[Any] = batch_size
lowerCAmelCase_ :Optional[int] = image_size
lowerCAmelCase_ :Any = patch_size
lowerCAmelCase_ :Dict = num_channels
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :Tuple = use_labels
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Dict = num_hidden_layers
lowerCAmelCase_ :str = num_attention_heads
lowerCAmelCase_ :str = intermediate_size
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :Optional[int] = hidden_dropout_prob
lowerCAmelCase_ :List[str] = attention_probs_dropout_prob
lowerCAmelCase_ :str = type_sequence_label_size
lowerCAmelCase_ :Dict = initializer_range
lowerCAmelCase_ :Optional[int] = mask_ratio
lowerCAmelCase_ :Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ :int = (image_size // patch_size) ** 2
lowerCAmelCase_ :int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> str:
lowerCAmelCase_ :List[Any] = TFViTMAEModel(config=__A )
lowerCAmelCase_ :Union[str, Any] = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :int = TFViTMAEForPreTraining(__A )
lowerCAmelCase_ :str = model(__A , training=__A )
# expected sequence length = num_patches
lowerCAmelCase_ :List[Any] = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ :Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ :List[Any] = 1
lowerCAmelCase_ :List[Any] = TFViTMAEForPreTraining(__A )
lowerCAmelCase_ :str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :int = model(__A , training=__A )
lowerCAmelCase_ :int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) :str = config_and_inputs
lowerCAmelCase_ :int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Tuple = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase_ :Any = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = False
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFViTMAEModelTester(self )
lowerCAmelCase_ :str = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :int = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase_ :Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(__A )
lowerCAmelCase_ :Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :int = [*signature.parameters.keys()]
lowerCAmelCase_ :int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __lowerCAmelCase ( self ) -> int:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[int] = model_class(__A )
lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :str = model(__A , noise=__A )
lowerCAmelCase_ :int = copy.deepcopy(self._prepare_for_class(__A , __A ) )
lowerCAmelCase_ :Optional[Any] = model(**__A , noise=__A )
lowerCAmelCase_ :Dict = outputs_dict[0].numpy()
lowerCAmelCase_ :Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ) -> Tuple:
# make the mask reproducible
np.random.seed(2 )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A ):
lowerCAmelCase_ :int = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__A ):
lowerCAmelCase_ :Any = v.numpy()
else:
lowerCAmelCase_ :Dict = np.array(__A )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase_ :Tuple = model_class(__A )
lowerCAmelCase_ :str = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :str = prepare_numpy_arrays(__A )
lowerCAmelCase_ :Any = model(__A , noise=__A )
lowerCAmelCase_ :int = model(**__A , noise=__A )
self.assert_outputs_same(__A , __A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
lowerCAmelCase_ :Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase_ :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[Any] = tf.constant(__A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ :Optional[int] = tf_noise
super().check_pt_tf_models(__A , __A , __A )
def __lowerCAmelCase ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(__A , __A ),)
if isinstance(__A , __A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__A , """_keras_serializable""" , __A )
}
lowerCAmelCase_ :Any = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[Any] = tf.convert_to_tensor(__A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase_ :Optional[Any] = main_layer_class(__A )
lowerCAmelCase_ :Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase_ :Tuple = tf.keras.Model(__A , outputs=main_layer(__A ) )
lowerCAmelCase_ :List[Any] = model(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :List[Any] = os.path.join(__A , """keras_model.h5""" )
model.save(__A )
lowerCAmelCase_ :List[str] = tf.keras.models.load_model(
__A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__A , tf.keras.Model )
lowerCAmelCase_ :Optional[Any] = model(__A )
self.assert_outputs_same(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(__A )
lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :Tuple = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Dict = outputs.last_hidden_state.numpy()
lowerCAmelCase_ :int = 0
else:
lowerCAmelCase_ :str = outputs.logits.numpy()
lowerCAmelCase_ :str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
lowerCAmelCase_ :str = model_class.from_pretrained(__A )
lowerCAmelCase_ :Optional[int] = model(__A , noise=__A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Union[str, Any] = after_outputs["""last_hidden_state"""].numpy()
lowerCAmelCase_ :List[str] = 0
else:
lowerCAmelCase_ :List[Any] = after_outputs["""logits"""].numpy()
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__A , 1E-5 )
def __lowerCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
lowerCAmelCase_ , lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :List[str] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[Any] = model_class(__A )
lowerCAmelCase_ :Tuple = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :Tuple = model(__A , noise=__A )
lowerCAmelCase_ :Optional[int] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__A )
lowerCAmelCase_ :Optional[int] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase_ :Optional[int] = model_class.from_config(model.config )
lowerCAmelCase_ :str = new_model(__A ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase_ :Dict = new_model(__A , noise=__A )
self.assert_outputs_same(__A , __A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCAmelCase_ :List[Any] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCAmelCase_ :Any = self.default_image_processor
lowerCAmelCase_ :Dict = prepare_img()
lowerCAmelCase_ :int = image_processor(images=__A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ :Tuple = ViTMAEConfig()
lowerCAmelCase_ :List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ :int = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase_ :Union[str, Any] = model(**__A , noise=__A )
# verify the logits
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :int = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __A , atol=1E-4 )
| 84
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Dict = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = inputs["""prompt"""]
lowerCAmelCase_ :Optional[int] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Optional[int] = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""image"""]
else:
lowerCAmelCase_ :int = None
if "mask_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""mask_image"""]
else:
lowerCAmelCase_ :int = None
if "original_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""original_image"""]
else:
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
lowerCAmelCase_ :List[str] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :int = image
if mask_image is not None:
lowerCAmelCase_ :Tuple = mask_image
if original_image is not None:
lowerCAmelCase_ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
lowerCAmelCase_ :Optional[int] = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Tuple = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase_ :Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :Optional[int] = image
if mask_image is not None:
lowerCAmelCase_ :str = mask_image
if original_image is not None:
lowerCAmelCase_ :Tuple = original_image
lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Dict = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 84
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Dict ):
for attribute in key.split('.' ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCAmelCase = value
elif weight_type == "weight_g":
lowerCAmelCase = value
elif weight_type == "weight_v":
lowerCAmelCase = value
elif weight_type == "bias":
lowerCAmelCase = value
else:
lowerCAmelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : List[Any] ):
lowerCAmelCase = []
lowerCAmelCase = fairseq_model.state_dict()
lowerCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCAmelCase = True
if "*" in mapped_key:
lowerCAmelCase = name.split(lowerCamelCase )[0].split('.' )[-2]
lowerCAmelCase = mapped_key.replace('*' , lowerCamelCase )
if "weight_g" in name:
lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
lowerCAmelCase = 'weight_v'
elif "weight" in name:
lowerCAmelCase = 'weight'
elif "bias" in name:
lowerCAmelCase = 'bias'
else:
lowerCAmelCase = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple ):
lowerCAmelCase = full_name.split('conv_layers.' )[-1]
lowerCAmelCase = name.split('.' )
lowerCAmelCase = int(items[0] )
lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCAmelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase )
def a_ ( lowerCamelCase : str , lowerCamelCase : List[str] ):
lowerCAmelCase = SEWConfig()
if is_finetuned:
lowerCAmelCase = model.wav_encoder.wav_model.cfg
else:
lowerCAmelCase = model.cfg
lowerCAmelCase = fs_config.conv_bias
lowerCAmelCase = eval(fs_config.conv_feature_layers )
lowerCAmelCase = [x[0] for x in conv_layers]
lowerCAmelCase = [x[1] for x in conv_layers]
lowerCAmelCase = [x[2] for x in conv_layers]
lowerCAmelCase = 'gelu'
lowerCAmelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
lowerCAmelCase = 0.0
lowerCAmelCase = fs_config.activation_fn.name
lowerCAmelCase = fs_config.encoder_embed_dim
lowerCAmelCase = 0.02
lowerCAmelCase = fs_config.encoder_ffn_embed_dim
lowerCAmelCase = 1e-5
lowerCAmelCase = fs_config.encoder_layerdrop
lowerCAmelCase = fs_config.encoder_attention_heads
lowerCAmelCase = fs_config.conv_pos_groups
lowerCAmelCase = fs_config.conv_pos
lowerCAmelCase = len(lowerCamelCase )
lowerCAmelCase = fs_config.encoder_layers
lowerCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCAmelCase = model.cfg
lowerCAmelCase = fs_config.final_dropout
lowerCAmelCase = fs_config.layerdrop
lowerCAmelCase = fs_config.activation_dropout
lowerCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCAmelCase = fs_config.attention_dropout
lowerCAmelCase = fs_config.dropout_input
lowerCAmelCase = fs_config.dropout
lowerCAmelCase = fs_config.mask_channel_length
lowerCAmelCase = fs_config.mask_channel_prob
lowerCAmelCase = fs_config.mask_length
lowerCAmelCase = fs_config.mask_prob
lowerCAmelCase = 'Wav2Vec2FeatureExtractor'
lowerCAmelCase = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def a_ ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Any=None , lowerCamelCase : Dict=None , lowerCamelCase : int=True ):
if is_finetuned:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCAmelCase = SEWConfig.from_pretrained(lowerCamelCase )
else:
lowerCAmelCase = convert_config(model[0] , lowerCamelCase )
lowerCAmelCase = model[0].eval()
lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if is_finetuned:
if dict_path:
lowerCAmelCase = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.pad_index
lowerCAmelCase = target_dict.bos_index
lowerCAmelCase = target_dict.eos_index
lowerCAmelCase = len(target_dict.symbols )
lowerCAmelCase = os.path.join(lowerCamelCase , 'vocab.json' )
if not os.path.isdir(lowerCamelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase )
lowerCAmelCase = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase , )
lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
lowerCAmelCase = SEWForCTC(lowerCamelCase )
else:
lowerCAmelCase = SEWModel(lowerCamelCase )
feature_extractor.save_pretrained(lowerCamelCase )
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__snake_case =parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 350
|
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Any = StableUnCLIPPipeline
lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS
lowerCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowerCamelCase : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase = 3_2
lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=UpperCAmelCase__ , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=UpperCAmelCase__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCAmelCase__ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase__ )
lowerCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase__ , layers_per_block=1 , upcast_attention=UpperCAmelCase__ , use_linear_projection=UpperCAmelCase__ , )
torch.manual_seed(0 )
lowerCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00_085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL()
lowerCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=0 ) -> Optional[Any]:
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase__ )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
lowerCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase__ )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase = pipe('anime turle' , generator=UpperCAmelCase__ , output_type='np' )
lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 55
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCamelCase = 4
__lowerCamelCase = 3
class A__ ( _snake_case ):
pass
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Union[str, Any]:
for shard in shards:
for i in range(UpperCAmelCase__ ):
yield {"i": i, "shard": shard}
def UpperCAmelCase__ ( ) -> List[str]:
A_ = int(os.environ["""RANK"""] )
A_ = int(os.environ["""WORLD_SIZE"""] )
A_ = ArgumentParser()
parser.add_argument("""--streaming""", type=UpperCAmelCase__ )
parser.add_argument("""--local_rank""", type=UpperCAmelCase__ )
parser.add_argument("""--num_workers""", type=UpperCAmelCase__, default=0 )
A_ = parser.parse_args()
A_ = args.streaming
A_ = args.num_workers
A_ = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase__ )]}
A_ = IterableDataset.from_generator(UpperCAmelCase__, gen_kwargs=UpperCAmelCase__ )
if not streaming:
A_ = Dataset.from_list(list(UpperCAmelCase__ ) )
A_ = split_dataset_by_node(UpperCAmelCase__, rank=UpperCAmelCase__, world_size=UpperCAmelCase__ )
A_ = torch.utils.data.DataLoader(UpperCAmelCase__, num_workers=UpperCAmelCase__ )
A_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
A_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
A_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 162
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162
| 1
|
from __future__ import annotations
def lowerCamelCase ( a_ ) -> list[int]: # This function is recursive
lowerCAmelCase_ = len(lowercase_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ = array[0]
lowerCAmelCase_ = False
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ = True
lowerCAmelCase_ = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ = longest_subsequence(lowercase_ )
if len(lowercase_ ) > len(lowercase_ ):
lowerCAmelCase_ = temp_array
else:
i += 1
lowerCAmelCase_ = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ = [pivot, *longest_subsequence(lowercase_ )]
if len(lowercase_ ) > len(lowercase_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 215
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (IPNDMScheduler,)
UpperCAmelCase = (("""num_inference_steps""", 50),)
def _snake_case ( self ,**a_ ) -> Optional[int]:
_UpperCAmelCase : str = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def _snake_case ( self ,a_=0 ,**a_ ) -> List[str]:
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Union[str, Any] = self.dummy_sample
_UpperCAmelCase : Union[str, Any] = 0.1 * sample
_UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : str = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : List[str] = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : Tuple = dummy_past_residuals[:]
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ,a_=0 ,**a_ ) -> Dict:
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
_UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Dict = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ,**a_ ) -> List[Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : List[Any] = 10
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[Any] = model(a_ ,a_ )
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = model(a_ ,a_ )
_UpperCAmelCase : int = scheduler.step(a_ ,a_ ,a_ ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" ,a_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ ,"""set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ ,"""set_timesteps""" ):
_UpperCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.timesteps[5]
_UpperCAmelCase : Optional[Any] = scheduler.timesteps[6]
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Optional[int] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _snake_case ( self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ ,time_step=a_ )
def _snake_case ( self ) -> int:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_ ,time_step=a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.full_loop()
_UpperCAmelCase : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 215
| 1
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> Union[str, Any]:
'''simple docstring'''
A: Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE_ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE_ )
[x.remove() for x in self.handles]
return self
@property
def _snake_case ( self : List[Any] ) -> str:
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 1
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : bool = True
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[int]:
'''simple docstring'''
A: List[Any] = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized
A: Union[str, Any] = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized
A: Any = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) )
A: Union[str, Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while"""
f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : nn.Module ) -> Any:
'''simple docstring'''
super().__init__()
A: List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), f"""Unexpected layer name {k}"""
A: int = len(SCREAMING_SNAKE_CASE_ ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
A: int = nn.ModuleDict(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Tensor ) -> str:
'''simple docstring'''
return get_trunk_forward_outputs(
SCREAMING_SNAKE_CASE_ , out_feat_keys=SCREAMING_SNAKE_CASE_ , feature_blocks=self._feature_blocks , )
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> str:
'''simple docstring'''
A: str = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
A: List[str] = self.convert_name_to_timm(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = partial(lambda: (timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval(), None) )
else:
A: Optional[Any] = super().__getitem__(SCREAMING_SNAKE_CASE_ )
return val
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE_ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
A: int = RegNetModel
else:
A: int = RegNetForImageClassification
return val
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
for from_key, to_key in keys:
A: List[Any] = from_state_dict[from_key].clone()
print(F"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> Tuple:
print(F"""Converting {name}...""" )
with torch.no_grad():
A: Dict = from_model_func()
A: Tuple = our_model_func(__lowercase ).eval()
A: Tuple = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase )
A: str = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
if from_state_dict is not None:
A: int = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
A: Dict = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
A: int = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase )
our_model.load_state_dict(__lowercase )
A: Optional[int] = our_model(__lowercase , output_hidden_states=__lowercase )
A: List[Any] = (
our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state
)
A: List[str] = from_model(__lowercase )
A: Optional[int] = from_output[-1] if type(__lowercase ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
A: Optional[Any] = our_outputs.hidden_states[-1]
assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__lowercase , )
A: List[str] = 2_2_4 if '''seer''' not in name else 3_8_4
# we can use the convnext one
A: Union[str, Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__lowercase )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , )
print(F"""Pushed {name}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> str:
A: str = '''imagenet-1k-id2label.json'''
A: Any = 1_0_0_0
A: Optional[Any] = (1, num_labels)
A: Tuple = '''huggingface/label-files'''
A: Optional[int] = num_labels
A: Any = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='''dataset''' ) ) , '''r''' ) )
A: Dict = {int(__lowercase ): v for k, v in idalabel.items()}
A: List[Any] = idalabel
A: Optional[int] = {v: k for k, v in idalabel.items()}
A: List[str] = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
A: Dict = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
A: Optional[Any] = NameToOurModelFuncMap()
A: List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]:
A: Dict = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location='''cpu''' )
A: List[str] = model_func()
# check if we have a head, if yes add it
A: Optional[int] = files['''classy_state_dict''']['''base_model''']['''model''']
A: Optional[Any] = model_state_dict['''trunk''']
model.load_state_dict(__lowercase )
return model.eval(), model_state_dict["heads"]
# pretrained
A: List[Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A: int = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A: Union[str, Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A: Tuple = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
# IN1K finetuned
A: Dict = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A: Any = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
A: List[Any] = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
A: Dict = partial(
__lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , )
if model_name:
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 356
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
UpperCamelCase = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCamelCase = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = DPRContextEncoderTokenizer
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Optional[int] = DPRQuestionEncoderTokenizer
UpperCamelCase = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCamelCase = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCamelCase = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ :
'''simple docstring'''
def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
elif titles is None or texts is None:
A: Union[str, Any] = titles if texts is None else texts
return super().__call__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Union[str, Any] = titles if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [titles]
A: Optional[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [texts]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: List[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else [questions] * n_passages
assert len(SCREAMING_SNAKE_CASE_ ) == len(
SCREAMING_SNAKE_CASE_ ), f"""There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE_ )} titles and {len(SCREAMING_SNAKE_CASE_ )} texts."""
A: Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: Dict = super().__call__(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )['''input_ids''']
A: str = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
}
if return_attention_mask is not False:
A: Union[str, Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A: Optional[Any] = attention_mask
return self.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : BatchEncoding , SCREAMING_SNAKE_CASE_ : DPRReaderOutput , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 64 , SCREAMING_SNAKE_CASE_ : int = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Any = reader_input['''input_ids''']
A , A , A: str = reader_output[:3]
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = sorted(range(SCREAMING_SNAKE_CASE_ ) , reverse=SCREAMING_SNAKE_CASE_ , key=relevance_logits.__getitem__ )
A: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A: List[str] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A: Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A: Union[str, Any] = sequence_ids.index(self.pad_token_id )
else:
A: int = len(SCREAMING_SNAKE_CASE_ )
A: Dict = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE_ , top_spans=SCREAMING_SNAKE_CASE_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE_ , start_index=SCREAMING_SNAKE_CASE_ , end_index=SCREAMING_SNAKE_CASE_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(SCREAMING_SNAKE_CASE_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
A: Union[str, Any] = []
for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A: Any = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[1] , reverse=SCREAMING_SNAKE_CASE_ )
A: Dict = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A: int = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(SCREAMING_SNAKE_CASE_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Any = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : Optional[Any] = DPRReaderTokenizer
| 334
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 260
|
"""simple docstring"""
import random
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = a[left_index]
_UpperCAmelCase = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
_UpperCAmelCase , _UpperCAmelCase = a[i], a[j]
i += 1
_UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index]
return i - 1
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if left < right:
_UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
_UpperCAmelCase , _UpperCAmelCase = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip()
_UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 260
| 1
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase : Any = _symbol_database.Default()
lowercase : int = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowercase : Optional[int] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase : List[Any] = None
lowercase : Tuple = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase : List[str] = 45
lowercase : List[Any] = 15_81
lowercase : List[str] = 15_17
lowercase : List[Any] = 15_70
lowercase : Optional[int] = 15_84
lowercase : Any = 17_93
lowercase : Any = 17_95
lowercase : Tuple = 19_16
lowercase : Any = 18_64
lowercase : List[Any] = 19_05
lowercase : str = 19_19
lowercase : Any = 24_29
lowercase : Optional[int] = 22_08
lowercase : Dict = 24_18
lowercase : Tuple = 23_23
lowercase : Tuple = 24_07
# @@protoc_insertion_point(module_scope)
| 359
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase_ ( snake_case__ = "laptop" ):
'''simple docstring'''
A : Tuple = F'https://www.amazon.in/laptop/s?k={product}'
A : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
A : List[str] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
A : Optional[Any] = item.ha.text
A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href''']
A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
A : Optional[int] = '''Not available'''
try:
A : str = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
A : List[Any] = ''''''
try:
A : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
A : str = float('''nan''' )
except AttributeError:
pass
A : Union[str, Any] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A : List[str] = ''' '''
A : Optional[Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase : Union[str, Any] = 'headphones'
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 311
| 0
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _A :
"""simple docstring"""
@staticmethod
def __snake_case ( *__UpperCAmelCase : str , **__UpperCAmelCase : Union[str, Any]):
pass
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
a : Any = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple):
a : List[Any] = DepthEstimationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str):
a : List[str] = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png")
self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)} , __UpperCAmelCase)
import datasets
a : Any = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test")
a : Tuple = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
])
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
{"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)},
] , __UpperCAmelCase , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF")
def __snake_case ( self : str):
pass
@slow
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Any = "Intel/dpt-large"
a : Union[str, Any] = pipeline("depth-estimation" , model=__UpperCAmelCase)
a : Tuple = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg")
a : List[str] = hashimage(outputs["depth"])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()) , 29.304)
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()) , 2.662)
@require_torch
def __snake_case ( self : Optional[int]):
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
| 40
|
'''simple docstring'''
import math
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
return math.pow(UpperCAmelCase_ , 2 ) - a
def __snake_case ( UpperCAmelCase_ : float ):
return 2 * x
def __snake_case ( UpperCAmelCase_ : float ):
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(UpperCAmelCase_ , 2 )
return start
def __snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00_0000_0000_0001 ):
if a < 0:
raise ValueError("math domain error" )
lowerCamelCase_ = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55
| 0
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
_lowerCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
_lowerCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
_lowerCamelCase = BeautifulSoup(res.text, 'html.parser')
_lowerCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 367
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ['pixel_values']
def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Optional[Any] , ):
super().__init__(**__snake_case )
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ):
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ = int((2_56 / 2_24) * size['''shortest_edge'''] )
UpperCAmelCase_ = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
__snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ):
UpperCAmelCase_ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(__snake_case , __snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(__snake_case , __snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 177
| 0
|
'''simple docstring'''
from jiwer import compute_measures
import datasets
A ='\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A ='\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A ='\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def A ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def A ( self : List[Any] , lowercase : List[str]=None , lowercase : List[str]=None , lowercase : Union[str, Any]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
UpperCAmelCase = 0
UpperCAmelCase = 0
for prediction, reference in zip(lowercase , lowercase ):
UpperCAmelCase = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 34
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14
| 0
|
import unittest
from transformers import DonutProcessor
lowercase_ = "naver-clova-ix/donut-base"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ):
__snake_case : Tuple = DonutProcessor.from_pretrained(UpperCamelCase_ )
def snake_case__ ( self : Optional[Any] ):
__snake_case : List[str] = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__snake_case : List[str] = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__snake_case : Union[str, Any] = self.processor.tokenajson(UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
| 354
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self : Dict , *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ):
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case__ ( self : List[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = {}
__snake_case : int = {}
if prompt is not None:
__snake_case : Dict = prompt
if generate_kwargs is not None:
__snake_case : List[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case : Optional[int] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__snake_case : Any = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCAmelCase : Union[str, Any] ):
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def snake_case__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str]=None ):
__snake_case : Optional[Any] = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
__snake_case : Tuple = self.model.config.model_type
if model_type == "git":
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Any = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
__snake_case : Tuple = [self.tokenizer.cls_token_id] + input_ids
__snake_case : int = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__snake_case : Dict = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case : int = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
__snake_case : Optional[Any] = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case : Tuple = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case : int = None
return model_inputs
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__snake_case : List[Any] = None
if generate_kwargs is None:
__snake_case : Dict = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case : Dict = model_inputs.pop(self.model.main_input_name )
__snake_case : Optional[int] = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def snake_case__ ( self : List[Any] , _lowerCAmelCase : str ):
__snake_case : Union[str, Any] = []
for output_ids in model_outputs:
__snake_case : Union[str, Any] = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 20
| 0
|
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return " ".join(
''.join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 29
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334
| 0
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE ( snake_case_ : int ) -> int:
print("Generating primitive root of p" )
while True:
snake_case__ : Union[str, Any] = random.randrange(3 , snake_case_ )
if pow(snake_case_ , 2 , snake_case_ ) == 1:
continue
if pow(snake_case_ , snake_case_ , snake_case_ ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE ( snake_case_ : int ) -> str:
print("Generating prime p..." )
snake_case__ : Optional[Any] = rabin_miller.generate_large_prime(snake_case_ ) # select large prime number.
snake_case__ : Optional[int] = primitive_root(snake_case_ ) # one primitive root on modulo p.
snake_case__ : Any = random.randrange(3 , snake_case_ ) # private_key -> have to be greater than 2 for safety.
snake_case__ : List[str] = cryptomath.find_mod_inverse(pow(snake_case_ , snake_case_ , snake_case_ ) , snake_case_ )
snake_case__ : str = (key_size, e_a, e_a, p)
snake_case__ : List[str] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ) -> str:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("\nWARNING:" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"Use a different name or delete these files and re-run this program." )
sys.exit()
snake_case__ : int = generate_key(snake_case_ )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , "w" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , "w" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
print("Making key files..." )
make_key_files("elgamal" , 2048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 359
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 1_6384,
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = LEDTokenizer
a_ = ["input_ids", "attention_mask"]
def __init__( self : int , __A : Optional[int]=None , __A : Any=None , __A : Any=None , __A : int="replace" , __A : Optional[Any]="<s>" , __A : List[Any]="</s>" , __A : Dict="</s>" , __A : Union[str, Any]="<s>" , __A : List[Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Optional[int]="<mask>" , __A : str=False , __A : Optional[Any]=True , **__A : List[Any] , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[str] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : Tuple = add_prefix_space
snake_case__ : Any = pre_tok_class(**__A )
snake_case__ : List[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case__ : str = "post_processor"
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : int = tuple(state["sep"] )
if "cls" in state:
snake_case__ : Optional[int] = tuple(state["cls"] )
snake_case__ : Union[str, Any] = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : List[str] = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : str = trim_offsets
snake_case__ : Tuple = True
if changes_to_apply:
snake_case__ : Any = getattr(__A , state.pop("type" ) )
snake_case__ : Optional[Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowercase ( self : Union[str, Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : str , __A : int ):
snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : Optional[int] = value
def _lowercase ( self : List[str] , *__A : Union[str, Any] , **__A : int ):
snake_case__ : Dict = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
snake_case__ : List[str] = kwargs.get("is_split_into_words" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
snake_case__ : str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Union[str, Any] , __A : int , __A : str=None ):
snake_case__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Optional[Any] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : str , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Dict = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Optional[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Union[str, Any] = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : Dict = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : Optional[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Optional[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 286
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : Optional[Any] = ["model.decoder.embed_positions.weights"]
def lowercase ( __magic_name__ ):
'''simple docstring'''
if "emb" in name:
UpperCAmelCase : str = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCAmelCase : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCAmelCase : List[Any] = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCAmelCase : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCAmelCase : Dict = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCAmelCase : Any = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCAmelCase : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCAmelCase : List[Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = list(state_dict.keys() )
UpperCAmelCase : List[Any] = {}
for key in keys:
UpperCAmelCase : Any = state_dict.pop(__magic_name__ )
UpperCAmelCase : str = rename_keys(__magic_name__ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : Optional[int] = val[:hidden_size, :]
UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase ( __magic_name__ ):
'''simple docstring'''
if checkpoint == "small":
# default config values
UpperCAmelCase : List[Any] = 1024
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
UpperCAmelCase : List[Any] = 1536
UpperCAmelCase : Optional[Any] = 48
UpperCAmelCase : List[str] = 24
elif checkpoint == "large":
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : str = 48
UpperCAmelCase : Optional[Any] = 32
else:
raise ValueError(F"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
UpperCAmelCase : Tuple = MusicgenDecoderConfig(
hidden_size=__magic_name__ , ffn_dim=hidden_size * 4 , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , )
return config
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__="cpu" ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(__magic_name__ , device=__magic_name__ )
UpperCAmelCase : List[str] = decoder_config_from_checkpoint(__magic_name__ )
UpperCAmelCase : Dict = fairseq_model.lm.state_dict()
UpperCAmelCase , UpperCAmelCase : List[str] = rename_state_dict(
__magic_name__ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
UpperCAmelCase : Any = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCAmelCase : int = MusicgenForCausalLM(__magic_name__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase , UpperCAmelCase : Optional[int] = decoder.load_state_dict(__magic_name__ , strict=__magic_name__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__magic_name__ )
if len(__magic_name__ ) > 0:
raise ValueError(F"Missing key(s) in state_dict: {missing_keys}" )
if len(__magic_name__ ) > 0:
raise ValueError(F"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
UpperCAmelCase : List[Any] = MusicgenForConditionalGeneration(text_encoder=__magic_name__ , audio_encoder=__magic_name__ , decoder=__magic_name__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__magic_name__ )
# check we can do a forward pass
UpperCAmelCase : Union[str, Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Optional[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : str = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCAmelCase : Dict = MusicgenProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
# set the appropriate bos/pad token ids
UpperCAmelCase : List[Any] = 2048
UpperCAmelCase : Tuple = 2048
# set other default generation config params
UpperCAmelCase : Tuple = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = 3.0
if pytorch_dump_folder is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
logger.info(F"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if repo_id:
logger.info(F"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(__magic_name__ )
processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
a : int = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 311
| 0
|
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
'''simple docstring'''
return base * power(SCREAMING_SNAKE_CASE__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
_lowerCAmelCase : str = int(input("Enter the base: ").strip())
_lowerCAmelCase : List[str] = int(input("Enter the exponent: ").strip())
_lowerCAmelCase : Dict = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_lowerCAmelCase : int = 1 / result
print(F"{base} to the power of {exponent} is {result}")
| 363
|
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __snake_case ( *SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[Dict, Any]] = None , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=2 ) -> Optional[Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase : Tuple = take_from
_UpperCAmelCase : Optional[int] = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : int = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
_UpperCAmelCase : Any = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : Tuple = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
_UpperCAmelCase : str = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_UpperCAmelCase : Tuple = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_UpperCAmelCase : Optional[int] = warning + " " if standard_warn else ""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[Any] = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase : Optional[int] = call_frame.filename
_UpperCAmelCase : Dict = call_frame.lineno
_UpperCAmelCase : List[Any] = call_frame.function
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 202
| 0
|
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
a_ : Any = logging.get_logger(__name__)
def _A (lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Dict=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class a :
_lowerCAmelCase = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
_lowerCAmelCase = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
_lowerCAmelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Use FP16 to accelerate inference."""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Benchmark training of model"""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Verbose memory tracing"""} )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Trace memory line by line"""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Save result to a CSV file"""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Save all print statements in a log file"""} )
_lowerCAmelCase = field(default=_UpperCAmelCase , metadata={"""help""": """Whether to print environment information"""} )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
_lowerCAmelCase = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
_lowerCAmelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
_lowerCAmelCase = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
_lowerCAmelCase = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
_lowerCAmelCase = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
_lowerCAmelCase = field(
default=f'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
_lowerCAmelCase = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
_lowerCAmelCase = field(
default=_UpperCAmelCase , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __UpperCAmelCase ( self ) -> Optional[int]:
warnings.warn(
f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , _UpperCAmelCase , )
def __UpperCAmelCase ( self ) -> List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def __UpperCAmelCase ( self ) -> Tuple:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 168
|
"""simple docstring"""
from jiwer import compute_measures
import datasets
__A = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__A = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__A = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def _snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(_UpperCAmelCase , _UpperCAmelCase )["wer"]
else:
lowercase__: Dict = 0
lowercase__: Union[str, Any] = 0
for prediction, reference in zip(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = compute_measures(_UpperCAmelCase , _UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 177
| 0
|
"""simple docstring"""
import operator as op
UpperCAmelCase__ = "scaler.pt"
UpperCAmelCase__ = "pytorch_model"
UpperCAmelCase__ = "random_states"
UpperCAmelCase__ = "optimizer"
UpperCAmelCase__ = "scheduler"
UpperCAmelCase__ = "pytorch_model.bin"
UpperCAmelCase__ = "pytorch_model.bin.index.json"
UpperCAmelCase__ = "model.safetensors"
UpperCAmelCase__ = "model.safetensors.index.json"
UpperCAmelCase__ = "1.10.2"
UpperCAmelCase__ = "py38"
UpperCAmelCase__ = "4.17.0"
UpperCAmelCase__ = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
UpperCAmelCase__ = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
UpperCAmelCase__ = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
UpperCAmelCase__ = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
UpperCAmelCase__ = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
UpperCAmelCase__ = "2.0.1"
UpperCAmelCase__ = ["pdsh", "standard", "openmpi", "mvapich"]
UpperCAmelCase__ = ["default", "reduce-overhead", "max-autotune"]
UpperCAmelCase__ = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCAmelCase__ = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
UpperCAmelCase__ = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
UpperCAmelCase__ = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 358
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ['''image''']
UpperCamelCase = ['''image''']
UpperCamelCase = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase = False
@property
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return 32
@property
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
return 8
@property
def _lowerCamelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCAmelCase = CLIPVisionModel(A)
return model
@property
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
@property
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_UpperCAmelCase = PriorTransformer(**A)
return model
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase = ShapERenderer(**A)
return model
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = self.dummy_renderer
_UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=A , clip_sample=A , clip_sample_range=1.0 , )
_UpperCAmelCase = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : List[str] , A : Optional[Any] , A : Tuple=0) -> Dict:
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A)).to(A)
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = pipe(**self.get_dummy_inputs(A))
_UpperCAmelCase = output.images[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _lowerCamelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=A , relax_max_difference=A , )
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = self.get_dummy_inputs(A)
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase = batch_size * [inputs[key]]
_UpperCAmelCase = pipe(**A , num_images_per_prompt=A)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png')
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy')
_UpperCAmelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img')
_UpperCAmelCase = pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = torch.Generator(device=A).manual_seed(0)
_UpperCAmelCase = pipe(
A , generator=A , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(A , A)
| 290
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_A , use_cache=_A )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__lowerCAmelCase = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_A )["last_hidden_state"]
__lowerCAmelCase = model(_A , past_key_values=_A )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
_a : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_a : List[Any] = True
_a : Dict = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__lowerCAmelCase = ConfigTester(self , config_class=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
| 92
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _snake_case( *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=2 ) -> Optional[Any]:
from .. import __version__
lowercase : int = take_from
lowercase : Tuple = ()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowercase : int = None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
lowercase : Union[str, Any] = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
lowercase : int = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowercase : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowercase : Dict = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowercase : str = inspect.getouterframes(inspect.currentframe() )[1]
lowercase : List[str] = call_frame.filename
lowercase : Tuple = call_frame.lineno
lowercase : List[str] = call_frame.function
lowercase , lowercase : Optional[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 20
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : str=13 , _A : Optional[Any]=7 , _A : Union[str, Any]=False , _A : Optional[int]=True , _A : Optional[int]=False , _A : List[str]=False , _A : List[str]=19 , _A : Union[str, Any]=32 , _A : List[Any]=5 , _A : Dict=4 , _A : Any=37 , _A : Union[str, Any]="gelu" , _A : Dict=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=16 , _A : Optional[int]=2 , _A : Dict=0.02 , _A : str=3 , _A : List[Any]=4 , _A : str=None , ) -> List[Any]:
__magic_name__ : Tuple = parent
__magic_name__ : Dict = batch_size
__magic_name__ : int = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Dict = use_input_mask
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : Optional[int] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : Dict = type_vocab_size
__magic_name__ : int = type_sequence_label_size
__magic_name__ : Any = initializer_range
__magic_name__ : Union[str, Any] = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Tuple = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = None
__magic_name__ : int = None
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : str = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Tuple , _A : Tuple , _A : Optional[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Any = EsmForProteinFolding(config=_A ).float()
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A )
__magic_name__ : Any = model(_A )
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Dict = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = False
A_ : Union[str, Any] = (EsmForProteinFolding,) if is_torch_available() else ()
A_ : List[Any] = ()
A_ : Any = {} if is_torch_available() else {}
A_ : Optional[int] = False
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Optional[int] = EsmFoldModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@unittest.skip('Does not support attention outputs' )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
pass
@unittest.skip
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __lowerCAmelCase ( self : int ) -> str:
pass
@unittest.skip('Esm does not support embedding resizing' )
def __lowerCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def __lowerCAmelCase ( self : int ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __lowerCAmelCase ( self : str ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __lowerCAmelCase ( self : str ) -> List[str]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def __lowerCAmelCase ( self : Any ) -> Tuple:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def __lowerCAmelCase ( self : Dict ) -> int:
pass
@unittest.skip('ESMFold only has one output format.' )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def __lowerCAmelCase ( self : int ) -> Tuple:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Any ) -> Any:
__magic_name__ : Any = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : List[Any] = model(_A )['positions']
__magic_name__ : Tuple = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1E-4 ) )
| 275
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase :Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
__magic_name__ : List[Any] = [image]
__magic_name__ : List[Any] = [trans(img.convert('RGB' ) ) for img in image]
__magic_name__ : Dict = torch.stack(lowerCAmelCase )
return image
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , _A : str , _A : int ) -> Dict:
super().__init__()
# make sure scheduler can always be converted to DDIM
__magic_name__ : Optional[int] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_A , scheduler=_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] ) -> Optional[int]:
if strength < 0 or strength > 1:
raise ValueError(F'The value of strength should in [0.0, 1.0] but is {strength}' )
def __lowerCAmelCase ( self : Any , _A : List[str] , _A : Optional[Any] , _A : int ) -> List[Any]:
# get the original timestep using init_timestep
__magic_name__ : Tuple = min(int(num_inference_steps * strength ) , _A )
__magic_name__ : Any = max(num_inference_steps - init_timestep , 0 )
__magic_name__ : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __lowerCAmelCase ( self : Any , _A : str , _A : Optional[int] , _A : Tuple , _A : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' )
__magic_name__ : Union[str, Any] = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_A )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__magic_name__ : Tuple = init_latents.shape
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
# get latents
print('add noise to latents at timestep' , _A )
__magic_name__ : List[str] = self.scheduler.add_noise(_A , _A , _A )
__magic_name__ : List[str] = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , _A : Union[torch.FloatTensor, PIL.Image.Image] = None , _A : float = 0.8 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 50 , _A : Optional[bool] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(_A )
# 2. Preprocess image
__magic_name__ : int = preprocess(_A )
# 3. set timesteps
self.scheduler.set_timesteps(_A , device=self.device )
__magic_name__ , __magic_name__ : Dict = self.get_timesteps(_A , _A , self.device )
__magic_name__ : Dict = timesteps[:1].repeat(_A )
# 4. Prepare latent variables
__magic_name__ : Optional[Any] = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A )
__magic_name__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(_A ):
# 1. predict noise model_output
__magic_name__ : Dict = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample
__magic_name__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Dict = self.numpy_to_pil(_A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_A )
| 275
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=UpperCAmelCase__ ):
__lowerCamelCase : Tuple = ["""flax""", """transformers"""]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> str:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase__ ):
__lowerCamelCase : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase__ ):
__lowerCamelCase : List[str] = ["""flax""", """transformers"""]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["flax", "transformers"] )
class lowerCAmelCase_ ( metaclass=UpperCAmelCase__ ):
__lowerCamelCase : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _snake_case ( cls , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
requires_backends(cls , ["flax", "transformers"] )
| 158
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
A_ , A_ : List[str] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : List[Any] = [(0, source)], set()
A_ : Optional[Any] = np.full((rows, cols) , np.inf )
A_ : int = 0
A_ : Optional[int] = np.empty((rows, cols) , dtype=_UpperCAmelCase )
A_ : Optional[int] = None
while queue:
((A_) , (A_)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : int = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : List[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
A_ , A_ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
A_ : Optional[Any] = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} )
a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class lowerCAmelCase__ :
a__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
a__ : Optional[str] = field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
a__ : Optional[int] = field(
default=1_024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=128 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
a__ : Optional[int] = field(
default=142 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} )
a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} )
a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} )
a__ : bool = field(
default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict:
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) )
def __magic_name__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
__lowerCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__lowerCAmelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowerCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__lowerCAmelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowerCamelCase = SeqaSeqDataset
# Get datasets
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowerCamelCase = (
dataset_class(
__lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowerCamelCase = (
build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None
)
__lowerCamelCase = SeqaSeqTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator(
__lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , )
__lowerCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowerCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowerCamelCase = train_result.metrics
__lowerCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
__lowerCamelCase = data_args.n_val
__lowerCamelCase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' )
__lowerCamelCase = test_output.metrics
__lowerCamelCase = data_args.n_test
if trainer.is_world_process_zero():
__lowerCamelCase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir )
all_metrics.update(__lowerCAmelCase )
if training_args.predict_with_generate:
__lowerCamelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
__lowerCamelCase = lmap(str.strip , __lowerCAmelCase )
write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 339
|
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__lowerCamelCase , __lowerCamelCase = y, x % y
return abs(__lowerCAmelCase )
def __magic_name__ ( ) -> Tuple:
try:
__lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
__lowerCamelCase = int(nums[0] )
__lowerCamelCase = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 339
| 1
|
def a_ ( _A , _A , _A ) -> float:
"""simple docstring"""
snake_case__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def a_ ( ) -> int:
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Any ) -> Any:
# Initialise PyTorch model
lowercase : Union[str, Any] = RemBertConfig.from_json_file(__snake_case )
print("Building PyTorch model from configuration: {}".format(str(__snake_case ) ) )
lowercase : str = RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print("Save PyTorch model to {}".format(__snake_case ) )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
_A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_A : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 202
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ):
"""simple docstring"""
lowercase__ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=SCREAMING_SNAKE_CASE__ )
lowercase__ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ )
# Parse args
lowercase__ = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , '''func''' ):
parser.print_help()
exit(1 )
lowercase__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ )
# Run
lowercase__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
service.run()
if __name__ == "__main__":
main()
| 367
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase__ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 93
| 0
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase__: Any = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case_ ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> str:
if args.student_type == "roberta":
UpperCAmelCase : Any = False
elif args.student_type == "gpt2":
UpperCAmelCase : List[Any] = False
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> str:
if args.student_type == "roberta":
UpperCAmelCase : List[Any] = False
def snake_case_ ( ) -> Union[str, Any]:
UpperCAmelCase : str = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_lowerCAmelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_lowerCAmelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_lowerCAmelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_lowerCAmelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_lowerCAmelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_lowerCAmelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_lowerCAmelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_lowerCAmelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_lowerCAmelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=_lowerCAmelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_lowerCAmelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_lowerCAmelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_lowerCAmelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_lowerCAmelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_lowerCAmelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_lowerCAmelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_lowerCAmelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_lowerCAmelCase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=_lowerCAmelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_lowerCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=_lowerCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=_lowerCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_lowerCAmelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=_lowerCAmelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_lowerCAmelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_lowerCAmelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_lowerCAmelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_lowerCAmelCase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_lowerCAmelCase , default=4000 , help='''Checkpoint interval.''' )
UpperCAmelCase : Optional[int] = parser.parse_args()
sanity_checks(_lowerCAmelCase )
# ARGS #
init_gpu_params(_lowerCAmelCase )
set_seed(_lowerCAmelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_lowerCAmelCase ) , _lowerCAmelCase , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase : List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase : str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase : int = tokenizer.all_special_tokens.index(_lowerCAmelCase )
UpperCAmelCase : List[Any] = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase : Any = special_tok_ids
UpperCAmelCase : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase : str = pickle.load(_lowerCAmelCase )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase : List[str] = pickle.load(_lowerCAmelCase )
UpperCAmelCase : str = np.maximum(_lowerCAmelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase : Optional[Any] = 0.0 # do not predict special tokens
UpperCAmelCase : str = torch.from_numpy(_lowerCAmelCase )
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[str] = LmSeqsDataset(params=_lowerCAmelCase , data=_lowerCAmelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase : Dict = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase : List[str] = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_lowerCAmelCase )
else:
UpperCAmelCase : Optional[Any] = student_model_class(_lowerCAmelCase )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_lowerCAmelCase )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_lowerCAmelCase , _lowerCAmelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_lowerCAmelCase , _lowerCAmelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase : Union[str, Any] = Distiller(
params=_lowerCAmelCase , dataset=_lowerCAmelCase , token_probs=_lowerCAmelCase , student=_lowerCAmelCase , teacher=_lowerCAmelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 23
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowercase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __a ( _SCREAMING_SNAKE_CASE ) ->Any:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
a__: Optional[int] = [image]
a__: str = [trans(img.convert('RGB' ) ) for img in image]
a__: Any = torch.stack(_SCREAMING_SNAKE_CASE )
return image
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
a__: Dict = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=lowercase , scheduler=lowercase)
def lowerCamelCase_ ( self , lowercase) -> int:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}')
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: int = min(int(num_inference_steps * strength) , lowercase)
a__: Any = max(num_inference_steps - init_timestep , 0)
a__: Union[str, Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> List[Any]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase)}')
a__: Tuple = image.to(device=lowercase , dtype=lowercase)
if isinstance(lowercase , lowercase) and len(lowercase) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(lowercase)}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.')
a__: List[str] = init_latents.shape
a__: List[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase)
# get latents
print('add noise to latents at timestep' , lowercase)
a__: int = self.scheduler.add_noise(lowercase , lowercase , lowercase)
a__: Dict = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase = None , lowercase = 0.8 , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase)
# 2. Preprocess image
a__: Tuple = preprocess(lowercase)
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device)
a__ , a__: Union[str, Any] = self.get_timesteps(lowercase , lowercase , self.device)
a__: Optional[int] = timesteps[:1].repeat(lowercase)
# 4. Prepare latent variables
a__: Union[str, Any] = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase)
a__: Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase):
# 1. predict noise model_output
a__: Dict = self.unet(lowercase , lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__: Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
a__: Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1)
a__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__: Dict = self.numpy_to_pil(lowercase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase)
| 290
| 0
|
'''simple docstring'''
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any]=13 , __snake_case : Union[str, Any]=7 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=True , __snake_case : List[Any]=True , __snake_case : List[str]=99 , __snake_case : int=32 , __snake_case : List[Any]=5 , __snake_case : Tuple=4 , __snake_case : Dict=4 , __snake_case : Tuple="gelu" , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=0.1 , __snake_case : str=True , __snake_case : Tuple=5_12 , __snake_case : str=16 , __snake_case : Optional[int]=2 , __snake_case : Dict=0.02 , __snake_case : List[Any]=3 , __snake_case : Union[str, Any]=4 , __snake_case : Dict=None , )-> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_multiple_size
snake_case = hidden_act
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = weight_tying
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def lowerCAmelCase ( self : int )-> str:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : str )-> Any:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : str , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] )-> List[Any]:
snake_case = GPTNeoXJapaneseModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[Any] )-> Optional[int]:
snake_case = True
snake_case = GPTNeoXJapaneseModel(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : int , __snake_case : str )-> Tuple:
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Optional[int] , __snake_case : Optional[Any] )-> Dict:
snake_case = True
snake_case = GPTNeoXJapaneseForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
snake_case = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case = model(__snake_case , attention_mask=__snake_case , output_hidden_states=__snake_case )
snake_case = output_from_no_past["""hidden_states"""][0]
snake_case = model(
__snake_case , attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )["""hidden_states"""][0]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
snake_case_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
snake_case = GPTNeoXJapaneseModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] )-> str:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Tuple:
# This regression test was failing with PyTorch < 1.3
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case = None
self.model_tester.create_and_check_model_as_decoder(__snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Dict:
snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> Optional[Any]:
snake_case = """abeja/gpt-neox-japanese-2.7b"""
snake_case = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
snake_case = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
snake_case = GPTNeoXJapaneseTokenizer.from_pretrained(__snake_case )
snake_case = GPTNeoXJapaneseForCausalLM.from_pretrained(__snake_case )
snake_case = []
for prompt in prompts:
snake_case = tokenizer(__snake_case , return_tensors="""pt""" ).input_ids
snake_case = model.generate(__snake_case , max_length=50 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
predicted_outputs += generated_string
self.assertListEqual(__snake_case , __snake_case )
| 368
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.