code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ) -> str:
__lowerCAmelCase: int = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def a__ ( ) -> List[Any]:
__lowerCAmelCase: int = parse_args()
# Import training_script as a module.
__lowerCAmelCase: Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCAmelCase: str = script_fpath.stem
__lowerCAmelCase: List[Any] = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
__lowerCAmelCase: Any = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 346
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__SCREAMING_SNAKE_CASE , "rb" ) as fp:
__lowerCAmelCase: Union[str, Any] = pickle.load(__SCREAMING_SNAKE_CASE , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase: Optional[Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
__lowerCAmelCase: Optional[Any] = corpus.vocab.__dict__
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase: Optional[Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase: str = TransfoXLConfig()
else:
__lowerCAmelCase: List[str] = TransfoXLConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase: int = TransfoXLLMHeadModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = load_tf_weights_in_transfo_xl(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
__lowerCAmelCase: List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Any = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(__SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(__SCREAMING_SNAKE_CASE )}" )
with open(__SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 346
| 1
|
from __future__ import annotations
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float , )-> tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import math
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float )-> float:
return math.pow(snake_case__ , 2 ) - a
def lowerCAmelCase ( snake_case__ : float )-> float:
return 2 * x
def lowerCAmelCase ( snake_case__ : float )-> float:
A_ = 2.0
while start <= a:
A_ = math.pow(snake_case__ , 2 )
return start
def lowerCAmelCase ( snake_case__ : float , snake_case__ : int = 9999 , snake_case__ : float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 )-> float:
if a < 0:
raise ValueError("math domain error" )
A_ = get_initial_point(snake_case__ )
for _ in range(snake_case__ ):
A_ = value
A_ = value - fx(snake_case__ , snake_case__ ) / fx_derivative(snake_case__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 608
| 0
|
"""simple docstring"""
def __A ( )-> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_lowerCAmelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 698
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( _lowerCAmelCase = "isbn/0140328726" ):
"""simple docstring"""
__lowercase =olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
__lowercase =f"""{olid} is not a valid Open Library olid"""
raise ValueError(_lowerCAmelCase )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase ={
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
__lowercase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__lowercase =[
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
__lowercase =data['First sentence']['value']
for key, value in data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =', '.join(_lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(f"\nSearching Open Library for ISBN: {isbn}...\n")
try:
lowerCamelCase = summarize_book(get_openlibrary_data(f"isbn/{isbn}"))
print("""\n""".join(f"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f"Sorry, there are no results for ISBN: {isbn}.")
| 474
| 0
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a__ = logging.get_logger('''transformers.models.speecht5''')
def snake_case__ ( a , a , a ) -> Tuple:
'''simple docstring'''
hf_model.apply_weight_norm()
snake_case__ = checkpoint["""input_conv.weight_g"""]
snake_case__ = checkpoint["""input_conv.weight_v"""]
snake_case__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
snake_case__ = checkpoint[F"""upsamples.{i}.1.weight_g"""]
snake_case__ = checkpoint[F"""upsamples.{i}.1.weight_v"""]
snake_case__ = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
snake_case__ = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
snake_case__ = checkpoint["""output_conv.1.weight_g"""]
snake_case__ = checkpoint["""output_conv.1.weight_v"""]
snake_case__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case__ ( a , a , a , a=None , a=None , ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
snake_case__ = SpeechTaHifiGanConfig.from_pretrained(a )
else:
snake_case__ = SpeechTaHifiGanConfig()
snake_case__ = SpeechTaHifiGan(a )
snake_case__ = torch.load(a )
load_weights(orig_checkpoint["""model"""]["""generator"""] , a , a )
snake_case__ = np.load(a )
snake_case__ = stats[0].reshape(-1 )
snake_case__ = stats[1].reshape(-1 )
snake_case__ = torch.from_numpy(a ).float()
snake_case__ = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 706
|
'''simple docstring'''
from __future__ import annotations
class __magic_name__:
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ , snake_case__ = text, pattern
snake_case__ , snake_case__ = len(__UpperCamelCase ), len(__UpperCamelCase )
def __lowerCAmelCase( self : Dict , __UpperCamelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase( self : Any , __UpperCamelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = []
for i in range(self.textLen - self.patLen + 1 ):
snake_case__ = self.mismatch_in_text(__UpperCamelCase )
if mismatch_index == -1:
positions.append(__UpperCamelCase )
else:
snake_case__ = self.match_in_pattern(self.text[mismatch_index] )
snake_case__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
a__ = '''ABAABA'''
a__ = '''AB'''
a__ = BoyerMooreSearch(text, pattern)
a__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 566
| 0
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__magic_name__ : str = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase__ ):
def __init__( self : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Any]=1 ):
"""simple docstring"""
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = dataset
lowerCAmelCase__ = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
lowerCAmelCase__ = n_copies
def __iter__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCAmelCase__ = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class SCREAMING_SNAKE_CASE__ (UpperCAmelCase__ ):
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCAmelCase__ = start_length
lowerCAmelCase__ = eof_strings
lowerCAmelCase__ = tokenizer
def __call__( self : int , __lowerCamelCase : int , __lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCAmelCase__ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def a_ ( __lowerCAmelCase ):
lowerCAmelCase__ = re.split('''(%s)''' % '''|'''.join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
lowerCAmelCase__ = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
lowerCAmelCase__ = batch["ids"].shape[-1]
lowerCAmelCase__ = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
lowerCAmelCase__ = batch["task_id"].repeat(__lowerCAmelCase )
lowerCAmelCase__ = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCAmelCase__ = accelerator.gather((generated_tokens, generated_tasks) )
lowerCAmelCase__ = generated_tokens.cpu().numpy()
lowerCAmelCase__ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
lowerCAmelCase__ = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCAmelCase__ = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def a_ ( ):
# Setup configuration
lowerCAmelCase__ = HfArgumentParser(__lowerCAmelCase )
lowerCAmelCase__ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCAmelCase__ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCAmelCase__ = "false"
if args.num_workers is None:
lowerCAmelCase__ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCAmelCase__ = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCAmelCase__ = tokenizer.eos_token
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCAmelCase__ = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
lowerCAmelCase__ = load_dataset('''openai_humaneval''' )
lowerCAmelCase__ = load_metric('''code_eval''' )
lowerCAmelCase__ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCAmelCase__ = args.n_samples // args.batch_size
lowerCAmelCase__ = TokenizedDataset(__lowerCAmelCase , human_eval['''test'''] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCAmelCase__ = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCAmelCase__ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCAmelCase__ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
lowerCAmelCase__ = []
for task in tqdm(range(__lowerCAmelCase ) ):
lowerCAmelCase__ = human_eval["test"][task]["test"]
lowerCAmelCase__ = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCAmelCase__ = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 615
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : int = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 613
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ) -> Any:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def __A ( self ) -> Any:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class a ( _UpperCAmelCase, unittest.TestCase ):
"""simple docstring"""
A__ : List[str] = True
A__ : List[str] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ) -> List[str]:
_UpperCAmelCase = FlaxRoFormerModelTester(self )
@slow
def __A ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=A_ )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(A_ )
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> List[str]:
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(A_ )[0]
_UpperCAmelCase = 50000
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A_ )
_UpperCAmelCase = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , A_ , atol=1e-4 ) )
| 705
|
"""simple docstring"""
def A__ ( A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A__ ( A__ ) -> dict[str, str]:
'''simple docstring'''
_UpperCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
_UpperCAmelCase = remove_duplicates(key.upper() )
_UpperCAmelCase = len(A__ )
# First fill cipher with key characters
_UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(A__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A__ ) , 26 ):
_UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase = alphabet[i - offset]
_UpperCAmelCase = char
return cipher_alphabet
def A__ ( A__ , A__ ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(A__ , A__ ) for ch in message.upper() )
def A__ ( A__ , A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A__ , A__ ) for ch in message.upper() )
def A__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = input("Enter message to encode or decode: " ).strip()
_UpperCAmelCase = input("Enter keyword: " ).strip()
_UpperCAmelCase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
_UpperCAmelCase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
_UpperCAmelCase = create_cipher_map(A__ )
print(func(A__ , A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 579
| 0
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( _lowerCAmelCase ):
"""simple docstring"""
A_ = ['''vqvae''']
def __init__( self : str , lowercase_ : AutoencoderKL , lowercase_ : UNetaDConditionModel , lowercase_ : Mel , lowercase_ : Union[DDIMScheduler, DDPMScheduler] , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , lowercase_ ) else 1_000
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : int = 1 , lowercase_ : str = None , lowercase_ : np.ndarray = None , lowercase_ : int = 0 , lowercase_ : int = 0 , lowercase_ : int = None , lowercase_ : torch.Generator = None , lowercase_ : float = 0 , lowercase_ : float = 0 , lowercase_ : torch.Generator = None , lowercase_ : float = 0 , lowercase_ : torch.Tensor = None , lowercase_ : torch.Tensor = None , lowercase_ : Optional[int]=True , ):
'''simple docstring'''
lowercase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowercase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowercase_ = noise
lowercase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowercase_ = self.mel.audio_slice_to_image(lowercase_ )
lowercase_ = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
lowercase_ = (input_image / 255) * 2 - 1
lowercase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowercase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowercase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase_ = int(mask_start_secs * pixels_per_second )
lowercase_ = int(mask_end_secs * pixels_per_second )
lowercase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowercase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )["sample"]
else:
lowercase_ = self.unet(lowercase_ , lowercase_ )["sample"]
if isinstance(self.scheduler , lowercase_ ):
lowercase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )["prev_sample"]
else:
lowercase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowercase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase_ = 1 / self.vqvae.config.scaling_factor * images
lowercase_ = self.vqvae.decode(lowercase_ )["sample"]
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase_ = (images * 255).round().astype("""uint8""" )
lowercase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode="""RGB""" ).convert("""L""" ) for _ in images) )
lowercase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def lowerCamelCase__ ( self : Dict , lowercase_ : List[Image.Image] , lowercase_ : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowercase_ = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
lowercase_ = (sample / 255) * 2 - 1
lowercase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase_ = self.scheduler.alphas_cumprod[t]
lowercase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase_ = 1 - alpha_prod_t
lowercase_ = self.unet(lowercase_ , lowercase_ )["sample"]
lowercase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase__ ( lowercase_ : torch.Tensor , lowercase_ : torch.Tensor , lowercase_ : float ):
'''simple docstring'''
lowercase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 451
|
'''simple docstring'''
from typing import List
import numpy as np
def UpperCAmelCase_ ( __lowerCamelCase : dict ):
lowercase_ :Dict = {key: len(__lowerCamelCase ) for key, value in gen_kwargs.items() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'\t- key {key} has length {length}' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowercase_ :Any = max(lists_lengths.values() ,default=0 )
return max(1 ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ):
lowercase_ :Tuple = []
for group_idx in range(__lowerCamelCase ):
lowercase_ :Any = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowercase_ :Optional[Any] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowercase_ :List[str] = range(__lowerCamelCase ,start + num_shards_to_add )
shards_indices_per_group.append(__lowerCamelCase )
return shards_indices_per_group
def UpperCAmelCase_ ( __lowerCamelCase : dict ,__lowerCamelCase : int ):
lowercase_ :Dict = _number_of_shards_in_gen_kwargs(__lowerCamelCase )
if num_shards == 1:
return [dict(__lowerCamelCase )]
else:
lowercase_ :Optional[Any] = _distribute_shards(num_shards=__lowerCamelCase ,max_num_jobs=__lowerCamelCase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowerCamelCase ,__lowerCamelCase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowerCamelCase ) )
]
def UpperCAmelCase_ ( __lowerCamelCase : List[dict] ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] ,__lowerCamelCase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase_ ( __lowerCamelCase : np.random.Generator ,__lowerCamelCase : dict ):
lowercase_ :Tuple = {len(__lowerCamelCase ) for value in gen_kwargs.values() if isinstance(__lowerCamelCase ,__lowerCamelCase )}
lowercase_ :Optional[Any] = {}
for size in list_sizes:
lowercase_ :int = list(range(__lowerCamelCase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowercase_ :List[Any] = dict(__lowerCamelCase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowercase_ :List[str] = [value[i] for i in indices_per_size[len(__lowerCamelCase )]]
return shuffled_kwargs
| 172
| 0
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = GPTSwaTokenizer
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = True
__magic_name__ :List[Any] = False
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :List[Any] = GPTSwaTokenizer(__UpperCAmelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = 'This is a test'
lowerCAmelCase__ :int = 'This is a test'
return input_text, output_text
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = '<s>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__UpperCAmelCase ) , 2_0_0_0 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = GPTSwaTokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
lowerCAmelCase__ :Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowerCAmelCase__ :Optional[int] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
lowerCAmelCase__ :List[str] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
# fmt: off
self.assertListEqual(
__UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = GPTSwaTokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = ['This is a test', 'I was born in 92000, and this is falsé.']
lowerCAmelCase__ :Optional[int] = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(__UpperCAmelCase ) , __UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(__UpperCAmelCase ) , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowerCAmelCase__ :Tuple = {'input_ids': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__UpperCAmelCase , )
| 560
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[Any] = AudioLDMPipeline
__magic_name__ :Union[str, Any] = TEXT_TO_AUDIO_PARAMS
__magic_name__ :Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
__magic_name__ :Dict = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def snake_case ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
lowerCAmelCase__ :int = ClapTextModelWithProjection(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
lowerCAmelCase__ :Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , )
lowerCAmelCase__ :str = SpeechTaHifiGan(__UpperCAmelCase )
lowerCAmelCase__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :Tuple = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Optional[int] = self.get_dummy_components()
lowerCAmelCase__ :List[str] = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Any = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :int = audioldm_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
lowerCAmelCase__ :int = audio[:1_0]
lowerCAmelCase__ :Optional[Any] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :int = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']]
# forward
lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = output.audios[0]
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 3 * [inputs.pop('prompt' )]
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
lowerCAmelCase__ :Union[str, Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
lowerCAmelCase__ :Dict = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
lowerCAmelCase__ :Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ :List[Any] = F.normalize(__UpperCAmelCase , dim=-1 )
lowerCAmelCase__ :int = prompt_embeds
# forward
lowerCAmelCase__ :str = audioldm_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_dummy_components()
lowerCAmelCase__ :str = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = audioldm_pipe.to(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = 3 * ['this is a negative prompt']
lowerCAmelCase__ :str = negative_prompt
lowerCAmelCase__ :List[Any] = 3 * [inputs['prompt']]
# forward
lowerCAmelCase__ :Any = audioldm_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = output.audios[0]
lowerCAmelCase__ :List[str] = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 3 * [inputs.pop('prompt' )]
lowerCAmelCase__ :str = []
for p in [prompt, negative_prompt]:
lowerCAmelCase__ :Optional[Any] = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='pt' , )
lowerCAmelCase__ :List[Any] = text_inputs['input_ids'].to(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCAmelCase__ :Dict = F.normalize(__UpperCAmelCase , dim=-1 )
embeds.append(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = embeds
# forward
lowerCAmelCase__ :Dict = audioldm_pipe(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :str = 'egg cracking'
lowerCAmelCase__ :Optional[int] = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
lowerCAmelCase__ :List[Any] = audio[:1_0]
lowerCAmelCase__ :Any = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :Optional[int] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
lowerCAmelCase__ :Tuple = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCAmelCase__ :str = 2
lowerCAmelCase__ :Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
lowerCAmelCase__ :Any = 2
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
lowerCAmelCase__ :List[str] = 2
lowerCAmelCase__ :List[str] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ :Dict = self.get_dummy_components()
lowerCAmelCase__ :Dict = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :str = audioldm_pipe.vocoder.config.sampling_rate
lowerCAmelCase__ :Tuple = self.get_dummy_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(audio_length_in_s=0.0_16 , **__UpperCAmelCase )
lowerCAmelCase__ :int = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_16
lowerCAmelCase__ :List[Any] = audioldm_pipe(audio_length_in_s=0.0_32 , **__UpperCAmelCase )
lowerCAmelCase__ :str = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.0_32
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ :Optional[int] = AudioLDMPipeline(**__UpperCAmelCase )
lowerCAmelCase__ :str = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = ['hey']
lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
lowerCAmelCase__ :List[Any] = output.audios.shape
assert audio_shape == (1, 2_5_6)
lowerCAmelCase__ :List[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCAmelCase__ :Tuple = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ :Any = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
lowerCAmelCase__ :Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def snake_case ( self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase )
@slow
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :str = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
lowerCAmelCase__ :Any = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCAmelCase__ :Optional[Any] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Dict = 2_5
lowerCAmelCase__ :List[Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
lowerCAmelCase__ :Optional[Any] = audio[7_7_2_3_0:7_7_2_4_0]
lowerCAmelCase__ :Dict = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCAmelCase__ :int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCAmelCase__ :int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.get_inputs(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
lowerCAmelCase__ :Tuple = audio[2_7_7_8_0:2_7_7_9_0]
lowerCAmelCase__ :Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCAmelCase__ :Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 560
| 1
|
from manim import *
class a ( UpperCAmelCase_ ):
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Any = Rectangle(height=0.5 , width=0.5 )
_UpperCAmelCase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_UpperCAmelCase : List[str] = [mem.copy() for i in range(6 )]
_UpperCAmelCase : str = [mem.copy() for i in range(6 )]
_UpperCAmelCase : List[Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Dict = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[int] = Text("CPU" , font_size=24 )
_UpperCAmelCase : Dict = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_UpperCAmelCase : List[Any] = [mem.copy() for i in range(4 )]
_UpperCAmelCase : Any = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Any = Text("GPU" , font_size=24 )
_UpperCAmelCase : Any = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Optional[Any] = Text("Model" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_UpperCAmelCase : Optional[Any] = []
for i, rect in enumerate(a__ ):
rect.set_stroke(a__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=a__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=a__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=a__ , buff=0.0 )
self.add(a__ )
cpu_targs.append(a__ )
_UpperCAmelCase : int = [mem.copy() for i in range(6 )]
_UpperCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 )
_UpperCAmelCase : Tuple = Text("Loaded Checkpoint" , font_size=24 )
_UpperCAmelCase : Optional[Any] = Group(a__ , a__ ).arrange(a__ , aligned_edge=a__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_UpperCAmelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCAmelCase : Dict = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_UpperCAmelCase : Any = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_UpperCAmelCase : str = MarkupText(
f'Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) , Write(a__ ) )
self.play(Write(a__ , run_time=1 ) , Create(a__ , run_time=1 ) )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[str] = []
for i, rect in enumerate(a__ ):
_UpperCAmelCase : Dict = fill.copy().set_fill(a__ , opacity=0.7 )
target.move_to(a__ )
first_animations.append(GrowFromCenter(a__ , run_time=1 ) )
_UpperCAmelCase : Union[str, Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(a__ , run_time=1.5 ) )
self.play(*a__ )
self.play(*a__ )
self.wait()
| 300
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a__ : List[Any] = 'sshleifer/mar_enro_6_3_student'
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __snake_case ( self : Dict ):
super().setUp()
UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=a__ , )
UpperCAmelCase = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __snake_case ( self : Optional[int] ):
MarianMTModel.from_pretrained(a__ )
@slow
@require_torch_gpu
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
UpperCAmelCase = f"{self.test_file_dir_str}/test_data/wmt_en_ro"
UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(a__ , str(a__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase = 6
UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(a__ , '''argv''' , a__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(a__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(a__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(a__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics['''val'''][0]
UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , a__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(a__ )
UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase = os.path.join(args.output_dir , a__ )
UpperCAmelCase = torch.load(a__ , map_location='''cpu''' )
UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(a__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 51
| 0
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Any:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
UpperCamelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCamelCase = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = int(input('Enter number of vertices: '))
SCREAMING_SNAKE_CASE__ = int(input('Enter number of edges: '))
SCREAMING_SNAKE_CASE__ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
SCREAMING_SNAKE_CASE__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
SCREAMING_SNAKE_CASE__ = int(input('Enter source:'))
SCREAMING_SNAKE_CASE__ = int(input('Enter destination:'))
SCREAMING_SNAKE_CASE__ = float(input('Enter weight:'))
SCREAMING_SNAKE_CASE__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 717
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( __UpperCamelCase=None )-> Union[str, Any]:
if subparsers is not None:
UpperCamelCase = subparsers.add_parser("""env""" )
else:
UpperCamelCase = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = torch.__version__
UpperCamelCase = torch.cuda.is_available()
UpperCamelCase = is_xpu_available()
UpperCamelCase = is_npu_available()
UpperCamelCase = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
UpperCamelCase = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""PyTorch XPU available""": str(__UpperCamelCase ),
"""PyTorch NPU available""": str(__UpperCamelCase ),
"""System RAM""": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
UpperCamelCase = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
UpperCamelCase = (
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F"\t{accelerate_config}"
)
print(__UpperCamelCase )
UpperCamelCase = accelerate_config
return info
def lowercase__ ( )-> int:
UpperCamelCase = env_command_parser()
UpperCamelCase = parser.parse_args()
env_command(__UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 35
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deta'
__magic_name__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __snake_case=None , __snake_case=9_0_0 , __snake_case=2_0_4_8 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=6 , __snake_case=1_0_2_4 , __snake_case=8 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=True , __snake_case=False , __snake_case="sine" , __snake_case=5 , __snake_case=4 , __snake_case=4 , __snake_case=True , __snake_case=3_0_0 , __snake_case=True , __snake_case=True , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , __snake_case=0.25 , **__snake_case , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__snake_case , __snake_case ):
snake_case = backbone_config.pop('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(__snake_case )
snake_case = backbone_config
snake_case = num_queries
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = auxiliary_loss
snake_case = position_embedding_type
# deformable attributes
snake_case = num_feature_levels
snake_case = encoder_n_points
snake_case = decoder_n_points
snake_case = two_stage
snake_case = two_stage_num_proposals
snake_case = with_box_refine
snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
snake_case = focal_alpha
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
| 550
|
def UpperCAmelCase__ (UpperCamelCase_ = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 ,n + 1 ) )
if __name__ == "__main__":
print(solution())
| 550
| 1
|
"""simple docstring"""
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Tuple = 0
for i in range(1 ,1_001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 712
|
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = set()
# edges = list of graph's edges
_UpperCamelCase : Union[str, Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase, _UpperCamelCase : str = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def lowercase__ ( lowercase_ ) -> set:
"""simple docstring"""
_UpperCamelCase : List[str] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 51
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :int = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase :str = 2_5_6_0_4_7
lowerCamelCase :List[str] = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = NllbTokenizer
__SCREAMING_SNAKE_CASE : Dict = NllbTokenizerFast
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : int = True
__SCREAMING_SNAKE_CASE : int = {}
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : str = NllbTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
A_ : Union[str, Any] = NllbTokenizer(__a , keep_accents=__a )
A_ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A_ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ : Any = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def _a (self ):
A_ : Optional[int] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
A_ : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a )
A_ : List[Any] = tempfile.mkdtemp()
A_ : Union[str, Any] = tokenizer_r.save_pretrained(__a )
A_ : Any = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A_ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
A_ : Optional[Any] = tokenizer_r.from_pretrained(__a )
A_ : str = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[str] = tokenizer_r.save_pretrained(__a , legacy_format=__a )
A_ : Optional[Any] = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
A_ : List[Any] = tokenizer_r.from_pretrained(__a )
A_ : Optional[int] = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
A_ : List[Any] = tempfile.mkdtemp()
A_ : Tuple = tokenizer_r.save_pretrained(__a , legacy_format=__a )
A_ : Dict = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A_ : List[Any] = tokenizer_r.from_pretrained(__a )
A_ : Dict = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
def _a (self ):
if not self.test_seqaseq:
return
A_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
A_ : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
A_ : Optional[int] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
A_ : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A_ : Dict = tokenizer.prepare_seqaseq_batch(
__a , tgt_texts=__a , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A_ : List[Any] = tokenizer.prepare_seqaseq_batch(
src_texts=__a , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , __a )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def _a (self ):
pass
def _a (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Tuple = [AddedToken("""<special>""" , lstrip=__a )]
A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
A_ : Tuple = tokenizer_r.encode("""Hey this is a <special> token""" )
A_ : Tuple = tokenizer_r.encode("""<special>""" , add_special_tokens=__a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A_ : Dict = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a , )
A_ : Dict = self.tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
A_ : Any = tokenizer_p.encode("""Hey this is a <special> token""" )
A_ : List[Any] = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'facebook/nllb-200-distilled-600M'
__SCREAMING_SNAKE_CASE : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__SCREAMING_SNAKE_CASE : str = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__SCREAMING_SNAKE_CASE : Any = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def _a (cls ):
A_ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
A_ : List[Any] = 1
return cls
def _a (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def _a (self ):
A_ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _a (self ):
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
A_ : Tuple = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
A_ : Union[str, Any] = self.tokenizer.decode(__a , skip_special_tokens=__a )
A_ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _a (self ):
A_ : List[str] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
A_ : Any = 10
A_ : Optional[Any] = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __a )
self.assertEqual(len(__a ) , __a )
def _a (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def _a (self ):
A_ : str = tempfile.mkdtemp()
A_ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
A_ : Dict = NllbTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _a (self ):
A_ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
A_ : str = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A_ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a (self ):
A_ : str = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
A_ : Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
A_ : str = targets["input_ids"]
A_ : Any = shift_tokens_right(
__a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ):
A_ : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def _a (self ):
A_ : Union[str, Any] = True
A_ : str = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
A_ : List[Any] = False
A_ : Any = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 667
|
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [True] * n
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase : Union[str, Any] = i * 2
while index < n:
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[Any] = index + i
_lowerCAmelCase : str = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def A ( _lowerCamelCase = 999_966_663_333 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = math.floor(math.sqrt(_lowerCamelCase ) ) + 100
_lowerCAmelCase : str = prime_sieve(_lowerCamelCase )
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase : int = primes[prime_index + 1]
_lowerCAmelCase : Dict = last_prime**2
_lowerCAmelCase : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase : List[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 500
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCAmelCase__ : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
__lowerCamelCase = None
__lowerCamelCase = """utf-8"""
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = True # deprecated
__lowerCamelCase = None # deprecated
__lowerCamelCase = 10 << 20 # 10MB
__lowerCamelCase = None
class __snake_case ( datasets.ArrowBasedBuilder ):
__lowerCamelCase = JsonConfig
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
snake_case__ : str = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case__ : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCamelCase , (str, list, tuple) ):
snake_case__ : Any = data_files
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Optional[Any] = [files]
snake_case__ : List[str] = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ : List[Any] = []
for split_name, files in data_files.items():
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : List[Any] = [files]
snake_case__ : Any = [dl_manager.iter_files(__UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def __a ( self , __UpperCamelCase ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
snake_case__ : List[Any] = self.config.features.arrow_schema.field(__UpperCamelCase ).type
snake_case__ : List[str] = pa_table.append_column(__UpperCamelCase , pa.array([None] * len(__UpperCamelCase ) , type=__UpperCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case__ : List[str] = table_cast(__UpperCamelCase , self.config.features.arrow_schema )
return pa_table
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Union[str, Any] = json.load(__UpperCamelCase )
# We keep only the field we are interested in
snake_case__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__UpperCamelCase , (list, tuple) ):
snake_case__ : List[Any] = set().union(*[row.keys() for row in dataset] )
snake_case__ : List[Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
else:
snake_case__ : List[Any] = dataset
snake_case__ : Dict = pa.Table.from_pydict(__UpperCamelCase )
yield file_idx, self._cast_table(__UpperCamelCase )
# If the file has one json object per line
else:
with open(__UpperCamelCase , 'rb' ) as f:
snake_case__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
snake_case__ : Tuple = max(self.config.chunksize // 32 , 16 << 10 )
snake_case__ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
snake_case__ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__UpperCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
snake_case__ : int = batch.decode(self.config.encoding , errors=__UpperCamelCase ).encode('utf-8' )
try:
while True:
try:
snake_case__ : List[str] = paj.read_json(
io.BytesIO(__UpperCamelCase ) , read_options=paj.ReadOptions(block_size=__UpperCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__UpperCamelCase , pa.ArrowInvalid )
and "straddling" not in str(__UpperCamelCase )
or block_size > len(__UpperCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__UpperCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__UpperCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
snake_case__ : Tuple = json.load(__UpperCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__UpperCamelCase , __UpperCamelCase ): # list is the only sequence type supported in JSON
try:
snake_case__ : str = set().union(*[row.keys() for row in dataset] )
snake_case__ : Union[str, Any] = {col: [row.get(__UpperCamelCase ) for row in dataset] for col in keys}
snake_case__ : List[str] = pa.Table.from_pydict(__UpperCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__UpperCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__UpperCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCamelCase )
batch_idx += 1
| 711
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699
| 0
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_UpperCAmelCase = i + 1
else:
_UpperCAmelCase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 32
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 401
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
_UpperCAmelCase : str = {}
def a_ ( self : str , UpperCAmelCase_ : int , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
''' `placeholder_token` that is not already in the tokenizer.''' )
def a_ ( self : int , UpperCAmelCase_ : Dict , *UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=1 , **UpperCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
else:
_UpperCAmelCase : int = []
for i in range(UpperCAmelCase_ ):
_UpperCAmelCase : Optional[int] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
output.append(UpperCAmelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
_UpperCAmelCase : Optional[int] = output
def a_ ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=1.0 ) -> Dict:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCAmelCase : Optional[int] = []
for i in range(len(UpperCAmelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase : Any = self.token_map[placeholder_token]
_UpperCAmelCase : Dict = tokens[: 1 + int(len(UpperCAmelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase : Optional[int] = copy.copy(UpperCAmelCase_ )
random.shuffle(UpperCAmelCase_ )
_UpperCAmelCase : str = text.replace(UpperCAmelCase_ , ''' '''.join(UpperCAmelCase_ ) )
return text
def __call__( self : List[str] , UpperCAmelCase_ : str , *UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_ ) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def a_ ( self : Tuple , UpperCAmelCase_ : str , *UpperCAmelCase_ : str , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=1.0 , **UpperCAmelCase_ : List[str] ) -> Optional[int]:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_ ) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 416
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Optional[int] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = AlbertTokenizer
SCREAMING_SNAKE_CASE_ : Dict = AlbertTokenizerFast
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : int = True
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : int = AlbertTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = '''this is a test'''
_UpperCAmelCase : Union[str, Any] = '''this is a test'''
return input_text, output_text
def a_ ( self : Tuple ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = '''<pad>'''
_UpperCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def a_ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(UpperCAmelCase_ ) , 30000 )
def a_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def a_ ( self : Any ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase : Tuple = tokenizer.tokenize(UpperCAmelCase_ )
_UpperCAmelCase : Tuple = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
_UpperCAmelCase : str = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Any = tokenizer.encode(UpperCAmelCase_ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def a_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = AlbertTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
_UpperCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCAmelCase_ , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [48, 25, 21, 1289] )
_UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def a_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = AlbertTokenizer(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = tokenizer.encode('''sequence builders''' )
_UpperCAmelCase : Optional[Any] = tokenizer.encode('''multi-sequence build''' )
_UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
| 416
| 1
|
from __future__ import annotations
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
UpperCamelCase = 2
UpperCamelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCAmelCase )
if n > 1:
factors.append(_UpperCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 386
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a_ : Any = 'true'
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=16):
set_seed(42)
SCREAMING_SNAKE_CASE = RegressionModel()
SCREAMING_SNAKE_CASE = deepcopy(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RegressionDataset(length=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase)
model.to(accelerator.device)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return model, ddp_model, dataloader
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased')
SCREAMING_SNAKE_CASE = load_dataset('glue' , 'mrpc' , split='validation')
def tokenize_function(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase)
return outputs
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
SCREAMING_SNAKE_CASE = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(_UpperCAmelCase):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt')
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt')
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_dataloader(_UpperCAmelCase , not dispatch_batches)
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch.values()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase)
targs.append(_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.cat(_UpperCAmelCase), torch.cat(_UpperCAmelCase)
return logits, targs
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=82 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=16):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
assert (
len(_UpperCAmelCase) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase)}'''
def lowerCamelCase__ (_UpperCAmelCase = False , _UpperCAmelCase = False):
SCREAMING_SNAKE_CASE = evaluate.load('glue' , 'mrpc')
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase)
# First do baseline
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['no']
model.to(_UpperCAmelCase)
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase)
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'])
SCREAMING_SNAKE_CASE = metric.compute()
# Then do distributed
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase)
SCREAMING_SNAKE_CASE = outputs.logits.argmax(dim=-1)
SCREAMING_SNAKE_CASE = batch['labels']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(_UpperCAmelCase , _UpperCAmelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
SCREAMING_SNAKE_CASE = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(_UpperCAmelCase , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**')
SCREAMING_SNAKE_CASE = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512)
accelerator.state._reset_state()
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 73
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = """roformer"""
def __init__( self: Optional[int] , __lowerCAmelCase: Union[str, Any]=50_000 , __lowerCAmelCase: Tuple=None , __lowerCAmelCase: Union[str, Any]=768 , __lowerCAmelCase: Union[str, Any]=12 , __lowerCAmelCase: Any=12 , __lowerCAmelCase: int=3_072 , __lowerCAmelCase: Tuple="gelu" , __lowerCAmelCase: Optional[int]=0.1 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: Union[str, Any]=1_536 , __lowerCAmelCase: List[Any]=2 , __lowerCAmelCase: Optional[int]=0.02 , __lowerCAmelCase: List[Any]=1E-12 , __lowerCAmelCase: int=0 , __lowerCAmelCase: List[Any]=False , __lowerCAmelCase: Any=True , **__lowerCAmelCase: Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , **__lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size if embedding_size is None else embedding_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = rotary_value
__UpperCAmelCase = use_cache
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def _UpperCAmelCase ( self: Dict ) -> Union[str, Any]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__UpperCAmelCase = {0: "batch", 1: "sequence"}
__UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 715
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( A_ : int , A_ : Optional[Any] , A_ : Tuple ) -> List[str]:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Dict , A_ : Dict , A_ : Optional[int]="attention" ) -> Optional[int]:
__UpperCAmelCase = __UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( A_ : Dict , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : int=False ) -> List[Any]:
if split_mlp_wi:
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCAmelCase = (wi_a, wi_a)
else:
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : int , A_ : Optional[Any] , A_ : Optional[Any] ) -> Tuple:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def __lowerCAmelCase ( A_ : dict , *, A_ : int , A_ : bool , A_ : bool = False ) -> List[str]:
__UpperCAmelCase = traverse_util.flatten_dict(variables["target"] )
__UpperCAmelCase = {"/".join(A_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , A_ )
__UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase = old["token_embedder/embedding"]
# Encoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "encoder" , "attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(A_ , A_ , "encoder" , A_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , A_ , "encoder" ).T
__UpperCAmelCase = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , 0 , "encoder" ).T
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_self_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "decoder" , "self_attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_cross_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "decoder" , "encoder_decoder_attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(A_ , A_ , "decoder" , A_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase = tax_relpos_bias_lookup(A_ , A_ , "decoder" ).T
__UpperCAmelCase = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase = old["decoder/logits_dense/kernel"].T
return new
def __lowerCAmelCase ( A_ : Tuple , A_ : bool ) -> List[Any]:
__UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__UpperCAmelCase = state_dict["shared.weight"]
return state_dict
def __lowerCAmelCase ( A_ : Optional[Any] , A_ : int , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ) -> Optional[int]:
__UpperCAmelCase = checkpoints.load_tax_checkpoint(A_ )
__UpperCAmelCase = convert_tax_to_pytorch(
A_ , num_layers=config.num_layers , is_encoder_only=A_ , scalable_attention=A_ )
__UpperCAmelCase = make_state_dict(A_ , A_ )
model.load_state_dict(A_ , strict=A_ )
def __lowerCAmelCase ( A_ : List[str] , A_ : List[Any] , A_ : List[str] , A_ : bool = False , A_ : bool = False , ) -> Optional[int]:
__UpperCAmelCase = MTaConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase = UMTaEncoderModel(A_ )
else:
__UpperCAmelCase = UMTaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A_ , A_ , A_ , A_ , A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Verify that we can load the checkpoint.
model.from_pretrained(A_ )
print("Done" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
a_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 286
| 0
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case__ = 25_60_47
snake_case__ = 25_61_45
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase ):
a__: Any = NllbTokenizer
a__: List[str] = NllbTokenizerFast
a__: Dict = True
a__: int = True
a__: str = {}
def _lowerCAmelCase ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Dict = NllbTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
lowercase : List[Any] = NllbTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
lowercase : str = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : Tuple = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _lowerCAmelCase ( self : Optional[int] ):
lowercase : List[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowercase : int = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowercase : int = tempfile.mkdtemp()
lowercase : int = tokenizer_r.save_pretrained(lowerCAmelCase )
lowercase : List[str] = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
lowercase : Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase )
lowercase : Tuple = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
lowercase : Any = tempfile.mkdtemp()
lowercase : Optional[Any] = tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
lowercase : Dict = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
lowercase : str = tokenizer_r.from_pretrained(lowerCAmelCase )
lowercase : Any = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
lowercase : str = tempfile.mkdtemp()
lowercase : Dict = tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
lowercase : Any = tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Optional[int] = tokenizer_r.from_pretrained(lowerCAmelCase )
lowercase : int = tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
@require_torch
def _lowerCAmelCase ( self : List[str] ):
if not self.test_seqaseq:
return
lowercase : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
lowercase : str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
lowercase : List[str] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
lowercase : str = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase , tgt_texts=lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowercase : str = tokenizer.prepare_seqaseq_batch(
lowerCAmelCase , tgt_texts=lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowercase : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
def _lowerCAmelCase ( self : List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase : Union[str, Any] = [AddedToken('''<special>''' , lstrip=lowerCAmelCase )]
lowercase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase )
lowercase : Dict = tokenizer_r.encode('''Hey this is a <special> token''' )
lowercase : int = tokenizer_r.encode('''<special>''' , add_special_tokens=lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase , )
lowercase : Optional[int] = self.tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , **lowerCAmelCase )
lowercase : Union[str, Any] = tokenizer_p.encode('''Hey this is a <special> token''' )
lowercase : Optional[int] = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
a__: Optional[Any] = """facebook/nllb-200-distilled-600M"""
a__: Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
a__: Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
a__: Optional[int] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def _lowerCAmelCase ( cls : int ):
lowercase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
lowercase : Union[str, Any] = 1
return cls
def _lowerCAmelCase ( self : List[Any] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 25_6057 )
def _lowerCAmelCase ( self : str ):
lowercase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[int] ):
self.assertIn(lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
lowercase : Any = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
lowercase : Tuple = self.tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
lowercase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ):
lowercase : Dict = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase )
lowercase : str = 10
lowercase : List[Any] = self.tokenizer(lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
def _lowerCAmelCase ( self : List[str] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_6203, 3] )
def _lowerCAmelCase ( self : List[Any] ):
lowercase : str = tempfile.mkdtemp()
lowercase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase )
lowercase : List[Any] = NllbTokenizer.from_pretrained(lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase )
@require_torch
def _lowerCAmelCase ( self : Tuple ):
lowercase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase : Tuple = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowercase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase )
self.assertEqual(lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self : Union[str, Any] ):
lowercase : Optional[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=3 , return_tensors='''pt''' )
lowercase : int = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=10 , return_tensors='''pt''' )
lowercase : Optional[int] = targets['''input_ids''']
lowercase : Optional[int] = shift_tokens_right(
lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _lowerCAmelCase ( self : Any ):
lowercase : List[str] = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[25_6047, 70, 7356, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_6057,
} , )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
lowercase : Dict = True
lowercase : int = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
lowercase : Union[str, Any] = False
lowercase : Optional[int] = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 583
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
snake_case__ = logging.get_logger(__name__)
class UpperCAmelCase ( __lowerCamelCase ):
def __init__( self : Optional[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 583
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCAmelCase ( lowercase__ : int ):
'''simple docstring'''
a__ = int(number**0.5 )
return number == sq * sq
def UpperCAmelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ):
'''simple docstring'''
a__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a__ = x_den * y_den * z_den
a__ = gcd(lowercase__ , lowercase__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCAmelCase ( lowercase__ : int = 35 ):
'''simple docstring'''
a__ = set()
a__ = 42
a__ = Fraction(0 )
a__ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a__ = x_num * y_den + x_den * y_num
a__ = x_den * y_den
a__ = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
a__ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a__ = x_den * x_den * y_den * y_den
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
a__ = int(sqrt(lowercase__ ) )
a__ = int(sqrt(lowercase__ ) )
a__ = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=-1
a__ = x_num * y_num
a__ = x_den * y_num + x_num * y_den
a__ = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
a__ = x_num * x_num * y_num * y_num
a__ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
a__ = int(sqrt(lowercase__ ) )
a__ = int(sqrt(lowercase__ ) )
a__ = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a__ = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
for num, den in unique_s:
total += Fraction(lowercase__ , lowercase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 412
|
import operator as op
def UpperCAmelCase ( lowercase__ : str ):
'''simple docstring'''
a__ = []
a__ = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
a__ = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowercase__ ) , sep=""" | """ )
else:
a__ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowercase__ ) , sep=""" | """ )
a__ = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowercase__ ) , sep=""" | """ )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowercase__ ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_lowercase : int =input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix))
| 412
| 1
|
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(__UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 86
|
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 86
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__: Optional[Any] = logging.get_logger(__name__)
A__: List[Any] = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : Optional[Any] = "marian"
__UpperCamelCase : Optional[Any] = ["past_key_values"]
__UpperCamelCase : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[Any]=5_8_1_0_1 , SCREAMING_SNAKE_CASE :Any=None , SCREAMING_SNAKE_CASE :List[Any]=1_0_2_4 , SCREAMING_SNAKE_CASE :int=1_2 , SCREAMING_SNAKE_CASE :List[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE :Optional[Any]=1_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_2 , SCREAMING_SNAKE_CASE :Any=4_0_9_6 , SCREAMING_SNAKE_CASE :Optional[int]=1_6 , SCREAMING_SNAKE_CASE :Optional[Any]=0.0 , SCREAMING_SNAKE_CASE :Dict=0.0 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Dict="gelu" , SCREAMING_SNAKE_CASE :List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :Optional[int]=0.0 , SCREAMING_SNAKE_CASE :Optional[Any]=0.0 , SCREAMING_SNAKE_CASE :Optional[Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[int]=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Tuple=False , SCREAMING_SNAKE_CASE :int=5_8_1_0_0 , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , SCREAMING_SNAKE_CASE :List[Any]=0 , SCREAMING_SNAKE_CASE :Union[str, Any]=True , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> Dict:
'''simple docstring'''
_a : Tuple =vocab_size
_a : List[Any] =decoder_vocab_size or vocab_size
_a : Optional[Any] =max_position_embeddings
_a : List[str] =d_model
_a : Tuple =encoder_ffn_dim
_a : List[Any] =encoder_layers
_a : Optional[Any] =encoder_attention_heads
_a : Tuple =decoder_ffn_dim
_a : Optional[Any] =decoder_layers
_a : Any =decoder_attention_heads
_a : int =dropout
_a : str =attention_dropout
_a : List[str] =activation_dropout
_a : int =activation_function
_a : Dict =init_std
_a : int =encoder_layerdrop
_a : Tuple =decoder_layerdrop
_a : List[Any] =use_cache
_a : Union[str, Any] =encoder_layers
_a : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
_a : Union[str, Any] =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class A__ ( UpperCAmelCase__ ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Union[str, Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a : List[Any] ={0: """batch"""}
_a : Union[str, Any] ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_a : int ={0: """batch""", 1: """decoder_sequence"""}
_a : List[str] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a : Union[str, Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
_a , _a : Dict =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Dict ={0: """batch""", 2: """past_sequence + sequence"""}
_a : Dict ={0: """batch""", 2: """past_sequence + sequence"""}
else:
_a : List[Any] =OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self :str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Optional[int] =super().outputs
else:
_a : Tuple =super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
_a , _a : Tuple =self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
_a : Tuple ={0: """batch""", 2: """past_sequence + sequence"""}
_a : Optional[int] ={0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : str =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
_a : List[Any] =seq_length if not self.use_past else 1
_a : Union[str, Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict ={f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
_a : str =dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Tuple =common_inputs["""input_ids"""].shape
_a : Dict =common_inputs["""decoder_input_ids"""].shape[1]
_a , _a : int =self.num_attention_heads
_a : int =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : Union[str, Any] =decoder_seq_length + 3
_a : List[str] =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a : Dict =torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
_a : str =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a : int =self.num_layers
_a : Optional[Any] =min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : int =max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
_a : Any ="""encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
_a : List[Any] =encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
_a : Any =self._generate_dummy_inputs_for_encoder_and_decoder(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_a , _a : Optional[Any] =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_a : str =seqlen + 2
_a , _a : Any =self.num_layers
_a , _a : Tuple =self.num_attention_heads
_a : str =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a : Dict =common_inputs["""attention_mask"""].dtype
_a : Optional[int] =torch.cat(
[common_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
_a : Optional[int] =[
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a : Optional[int] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a : int =tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
_a : Optional[Any] =compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
_a : int =[""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a : str =dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :int = -1 , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Union[str, Any] =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
_a : List[str] =self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
_a : Any =super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
_a : List[Any] =super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
def __UpperCAmelCase ( self :Union[str, Any] ) -> float:
'''simple docstring'''
return 1e-4
| 506
|
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
A__: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> List[Any]:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
A__: Any = parser.parse_args()
A__: Union[str, Any] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 506
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
# Recurse if needed
if "." in tensor_name:
snake_case__ : Union[str, Any] = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case__ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
snake_case__ : Dict = new_module
snake_case__ : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
snake_case__ : List[str] = tensor_name in module._buffers
snake_case__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
snake_case__ : str = False
snake_case__ : Dict = False
if is_buffer or not is_bitsandbytes_available():
snake_case__ : List[str] = False
snake_case__ : Union[str, Any] = False
else:
snake_case__ : Any = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case__ : Optional[int] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case__ : List[str] = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
snake_case__ : int = value.to("""cpu""" )
if value.dtype == torch.inta:
snake_case__ : Optional[Any] = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
snake_case__ : Optional[Any] = torch.tensor(_lowerCAmelCase , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowerCAmelCase ) and fpaa_statistics is None:
snake_case__ : int = new_value.T
snake_case__ : List[Any] = old_value.__dict__
if is_abit:
snake_case__ : int = bnb.nn.IntaParams(_lowerCAmelCase , requires_grad=_lowerCAmelCase , **_lowerCAmelCase ).to(_lowerCAmelCase )
elif is_abit:
snake_case__ : Any = bnb.nn.Paramsabit(_lowerCAmelCase , requires_grad=_lowerCAmelCase , **_lowerCAmelCase ).to(_lowerCAmelCase )
snake_case__ : Optional[int] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(_lowerCAmelCase ) )
else:
if value is None:
snake_case__ : Optional[int] = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , torch.Tensor ):
snake_case__ : Tuple = value.to(_lowerCAmelCase )
else:
snake_case__ : List[Any] = torch.tensor(_lowerCAmelCase , device=_lowerCAmelCase )
if is_buffer:
snake_case__ : Any = new_value
else:
snake_case__ : Tuple = nn.Parameter(_lowerCAmelCase , requires_grad=old_value.requires_grad )
snake_case__ : List[str] = new_value
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False ) -> Union[str, Any]:
for name, module in model.named_children():
if current_key_name is None:
snake_case__ : Optional[int] = []
current_key_name.append(_lowerCAmelCase )
if (isinstance(_lowerCAmelCase , nn.Linear ) or isinstance(_lowerCAmelCase , _lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[Any] = module.weight.shape
else:
snake_case__ : Union[str, Any] = module.in_features
snake_case__ : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case__ : str = bnb.nn.LinearabitLt(
_lowerCAmelCase , _lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case__ : int = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case__ : Tuple = bnb.nn.Linearabit(
_lowerCAmelCase , _lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case__ : List[Any] = True
# Store the module class in case we need to transpose the weight later
snake_case__ : int = type(_lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCAmelCase )
if len(list(module.children() ) ) > 0:
snake_case__ , snake_case__ : Optional[int] = _replace_with_bnb_linear(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , has_been_replaced=_lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> int:
snake_case__ : Dict = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
snake_case__ , snake_case__ : Any = _replace_with_bnb_linear(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __snake_case( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , _lowerCAmelCase , )
return replace_with_bnb_linear(*_lowerCAmelCase , **_lowerCAmelCase )
def __snake_case( *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , _lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*_lowerCAmelCase , **_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case__ : str = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
snake_case__ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case__ : Tuple = sum(_lowerCAmelCase , [] )
snake_case__ : Tuple = len(_lowerCAmelCase ) > 0
# Check if it is a base model
snake_case__ : Tuple = not hasattr(_lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case__ : Tuple = list(model.named_children() )
snake_case__ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
snake_case__ : Any = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
snake_case__ : List[Any] = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
snake_case__ : Any = [""".weight""", """.bias"""]
snake_case__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case__ : Union[str, Any] = name.replace(_lowerCAmelCase , """""" )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
| 374
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
__a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=_a , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
lowercase = field(
default=_a , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_a )} , )
lowercase = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
default=_a , metadata={"help": "The input training data file (a text file)."} )
lowercase = field(
default=_a , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
lowercase = field(
default=_a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowercase = field(
default=_a , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
lowercase = field(
default=_a , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
lowercase = field(
default=_a , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
lowercase = field(
default=_a , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
lowercase = field(default=_a , metadata={"help": "Whether ot not to use whole word mask."} )
lowercase = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
lowercase = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
lowercase = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
lowercase = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
lowercase = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Tuple:
def _dataset(_lowerCAmelCase , _lowerCAmelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , ref_path=_lowerCAmelCase , )
return LineByLineTextDataset(tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_lowerCAmelCase , file_path=_lowerCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_lowerCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_lowerCAmelCase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __snake_case( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__ , snake_case__ , snake_case__ : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
snake_case__ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
snake_case__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
snake_case__ : List[Any] = AutoModelWithLMHead.from_config(_lowerCAmelCase )
model.resize_token_embeddings(len(_lowerCAmelCase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
snake_case__ : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case__ : List[str] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case__ : int = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case__ : Any = (
get_dataset(_lowerCAmelCase , tokenizer=_lowerCAmelCase , evaluate=_lowerCAmelCase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case__ : str = DataCollatorForPermutationLanguageModeling(
tokenizer=_lowerCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case__ : List[str] = DataCollatorForWholeWordMask(
tokenizer=_lowerCAmelCase , mlm_probability=data_args.mlm_probability )
else:
snake_case__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , data_collator=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , prediction_loss_only=_lowerCAmelCase , )
# Training
if training_args.do_train:
snake_case__ : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_lowerCAmelCase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : Dict = trainer.evaluate()
snake_case__ : Dict = math.exp(eval_output["""eval_loss"""] )
snake_case__ : str = {"""perplexity""": perplexity}
snake_case__ : Any = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(_lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , _lowerCAmelCase , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(_lowerCAmelCase )
return results
def __snake_case( _lowerCAmelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 374
| 1
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase__ ( A_ ):
def __init__( self : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None , _lowerCamelCase : str=True , _lowerCamelCase : Dict=None , **_lowerCamelCase : Any ):
_snake_case = parent
_snake_case = config_class
_snake_case = has_text_modality
_snake_case = kwargs
_snake_case = common_properties
def lowercase ( self : int ):
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
_snake_case = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowercase ( self : List[Any] ):
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def lowercase ( self : str ):
_snake_case = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(_lowerCamelCase , '''config.json''' )
config_first.to_json_file(_lowerCamelCase )
_snake_case = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : Union[str, Any] ):
_snake_case = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
_snake_case = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : Any ):
_snake_case = self.config_class(**self.inputs_dict )
_snake_case = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
_snake_case = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowercase ( self : str ):
_snake_case = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_snake_case = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowercase ( self : Optional[int] ):
if self.config_class.is_composition:
return
_snake_case = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
_snake_case = copy.deepcopy(_lowerCamelCase )
_snake_case = self.config_class(**_lowerCamelCase )
_snake_case = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
_snake_case = '''\n'''.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowercase ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 430
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> str:
_snake_case = int(__lowerCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCamelCase )
_snake_case , _snake_case = divmod(__lowerCamelCase , 2 )
return binary_recursive(__lowerCamelCase ) + str(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> str:
_snake_case = str(__lowerCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_snake_case = '''-''' if number.startswith('''-''' ) else ''''''
_snake_case = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__lowerCamelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430
| 1
|
from __future__ import annotations
import math
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : str = size
# approximate the overall size of segment tree with given value
lowerCamelCase : Dict = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCamelCase : Dict = [0 for i in range(0 , 4 * size )]
lowerCamelCase : Any = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCamelCase__ ( self , __magic_name__ ):
return idx * 2
def UpperCamelCase__ ( self , __magic_name__ ):
return idx * 2 + 1
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if left_element == right_element:
lowerCamelCase : Optional[int] = a[left_element - 1]
else:
lowerCamelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ )
self.build(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ )
lowerCamelCase : Dict = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if self.flag[idx] is True:
lowerCamelCase : Union[str, Any] = self.lazy[idx]
lowerCamelCase : str = False
if left_element != right_element:
lowerCamelCase : List[str] = self.lazy[idx]
lowerCamelCase : Optional[int] = self.lazy[idx]
lowerCamelCase : str = True
lowerCamelCase : Tuple = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCamelCase : int = val
if left_element != right_element:
lowerCamelCase : Union[str, Any] = val
lowerCamelCase : List[Any] = val
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
return True
lowerCamelCase : Any = (left_element + right_element) // 2
self.update(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
self.update(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : List[str] = max(
self.segment_tree[self.left(__magic_name__ )] , self.segment_tree[self.right(__magic_name__ )] )
return True
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if self.flag[idx] is True:
lowerCamelCase : str = self.lazy[idx]
lowerCamelCase : List[Any] = False
if left_element != right_element:
lowerCamelCase : Any = self.lazy[idx]
lowerCamelCase : Any = self.lazy[idx]
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Tuple = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCamelCase : List[str] = (left_element + right_element) // 2
lowerCamelCase : Union[str, Any] = self.query(self.left(__magic_name__ ) , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase : Optional[int] = self.query(self.right(__magic_name__ ) , mid + 1 , __magic_name__ , __magic_name__ , __magic_name__ )
return max(__magic_name__ , __magic_name__ )
def __str__( self ):
return str([self.query(1 , 1 , self.size , __magic_name__ , __magic_name__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_lowerCamelCase =[1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
_lowerCamelCase =1_5
_lowerCamelCase =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 681
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681
| 1
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase__ : List[str] =logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] ={
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __A ( snake_case__ ):
__A = '''conditional_detr'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , UpperCAmelCase_=True , UpperCAmelCase_=None , UpperCAmelCase_=3 , UpperCAmelCase_=300 , UpperCAmelCase_=6 , UpperCAmelCase_=2048 , UpperCAmelCase_=8 , UpperCAmelCase_=6 , UpperCAmelCase_=2048 , UpperCAmelCase_=8 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=True , UpperCAmelCase_="relu" , UpperCAmelCase_=256 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1.0 , UpperCAmelCase_=False , UpperCAmelCase_="sine" , UpperCAmelCase_="resnet50" , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=2 , UpperCAmelCase_=5 , UpperCAmelCase_=2 , UpperCAmelCase_=1 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=5 , UpperCAmelCase_=2 , UpperCAmelCase_=0.2_5 , **UpperCAmelCase_ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase =CONFIG_MAPPING['resnet'](out_features=["""stage4"""] )
elif isinstance(_A , _A ):
lowerCamelCase =backbone_config.get("""model_type""" )
lowerCamelCase =CONFIG_MAPPING[backbone_model_type]
lowerCamelCase =config_class.from_dict(_A )
lowerCamelCase =use_timm_backbone
lowerCamelCase =backbone_config
lowerCamelCase =num_channels
lowerCamelCase =num_queries
lowerCamelCase =d_model
lowerCamelCase =encoder_ffn_dim
lowerCamelCase =encoder_layers
lowerCamelCase =encoder_attention_heads
lowerCamelCase =decoder_ffn_dim
lowerCamelCase =decoder_layers
lowerCamelCase =decoder_attention_heads
lowerCamelCase =dropout
lowerCamelCase =attention_dropout
lowerCamelCase =activation_dropout
lowerCamelCase =activation_function
lowerCamelCase =init_std
lowerCamelCase =init_xavier_std
lowerCamelCase =encoder_layerdrop
lowerCamelCase =decoder_layerdrop
lowerCamelCase =encoder_layers
lowerCamelCase =auxiliary_loss
lowerCamelCase =position_embedding_type
lowerCamelCase =backbone
lowerCamelCase =use_pretrained_backbone
lowerCamelCase =dilation
# Hungarian matcher
lowerCamelCase =class_cost
lowerCamelCase =bbox_cost
lowerCamelCase =giou_cost
# Loss coefficients
lowerCamelCase =mask_loss_coefficient
lowerCamelCase =dice_loss_coefficient
lowerCamelCase =cls_loss_coefficient
lowerCamelCase =bbox_loss_coefficient
lowerCamelCase =giou_loss_coefficient
lowerCamelCase =focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def _snake_case ( self ):
return self.encoder_attention_heads
@property
def _snake_case ( self ):
return self.d_model
def _snake_case ( self ):
lowerCamelCase =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCamelCase =self.backbone_config.to_dict()
lowerCamelCase =self.__class__.model_type
return output
class __A ( snake_case__ ):
__A = version.parse("""1.11""" )
@property
def _snake_case ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ):
return 1E-5
@property
def _snake_case ( self ):
return 12
| 711
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase__ : int =logging.get_logger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase =UniSpeechSatForSequenceClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""projector.weight"""]
lowerCamelCase =downstream_dict["""projector.bias"""]
lowerCamelCase =downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase =downstream_dict["""model.post_net.linear.bias"""]
return model
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> int:
lowerCamelCase =UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""model.linear.weight"""]
lowerCamelCase =downstream_dict["""model.linear.bias"""]
return model
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
lowerCamelCase =UniSpeechSatForXVector.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
lowerCamelCase =downstream_dict["""connector.weight"""]
lowerCamelCase =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase =downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCamelCase =downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =checkpoint["""Downstream"""]
lowerCamelCase =UniSpeechSatConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase =WavaVecaFeatureExtractor.from_pretrained(
_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
lowerCamelCase =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCamelCase =convert_classification(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCamelCase =convert_diarization(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
elif arch.endswith("""ForXVector""" ):
lowerCamelCase =convert_xvector(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_UpperCAmelCase )
hf_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict =argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCAmelCase__ : List[Any] =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 269
| 0
|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80
|
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase : Tuple = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase : List[Any] = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
lowerCamelCase : str = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
# source code of `config_class`
_SCREAMING_SNAKE_CASE =inspect.getsource(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =_re_checkpoint.findall(_UpperCamelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_SCREAMING_SNAKE_CASE =ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_SCREAMING_SNAKE_CASE =f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_SCREAMING_SNAKE_CASE =ckpt_name
break
return checkpoint
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_SCREAMING_SNAKE_CASE =get_checkpoint_from_config_class(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_SCREAMING_SNAKE_CASE ='\n'.join(sorted(_UpperCamelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 405
| 0
|
import os
import sys
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Union[str, Any]:
'''simple docstring'''
return AutoConfig.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> str:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModel.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> List[str]:
'''simple docstring'''
return AutoModel.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> int:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> str:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Any:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*__lowercase , **__lowercase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def snake_case (*__lowercase , **__lowercase ) -> Tuple:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*__lowercase , **__lowercase )
| 580
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case (__lowercase , __lowercase=False ) -> List[Any]:
'''simple docstring'''
try:
_snake_case : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case : Optional[Any] = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__SCREAMING_SNAKE_CASE : List[str] = parse_flag_from_env('RUN_SLOW', default=False)
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(__lowercase )
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(__lowercase )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(__lowercase )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(__lowercase )
def snake_case (__lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(__lowercase )
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(__lowercase )
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(__lowercase )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(__lowercase )
def snake_case (__lowercase ) -> int:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(__lowercase )
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(__lowercase )
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(__lowercase )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(__lowercase )
def snake_case (__lowercase ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(__lowercase )
def snake_case (__lowercase ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(__lowercase )
def snake_case (__lowercase=None , __lowercase=None ) -> int:
'''simple docstring'''
if test_case is None:
return partial(__lowercase , version=__lowercase )
return unittest.skipUnless(is_torch_version(">=" , __lowercase ) , F"""test requires torch version >= {version}""" )(__lowercase )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(__lowercase )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(__lowercase )
def snake_case (__lowercase ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(__lowercase )
__SCREAMING_SNAKE_CASE : Any = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case (__lowercase ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(__lowercase )
class lowercase_ ( unittest.TestCase ):
_lowerCamelCase = True
@classmethod
def UpperCamelCase ( cls ):
_snake_case : List[str] = tempfile.mkdtemp()
@classmethod
def UpperCamelCase ( cls ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCamelCase ( self ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowercase_ )
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = mocks if isinstance(lowercase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case (__lowercase ) -> List[Any]:
'''simple docstring'''
_snake_case : Tuple = AcceleratorState()
_snake_case : str = tensor[None].clone().to(state.device )
_snake_case : List[str] = gather(__lowercase ).cpu()
_snake_case : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowercase ):
return False
return True
class lowercase_ :
def __init__( self , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : Any = returncode
_snake_case : List[str] = stdout
_snake_case : Tuple = stderr
async def snake_case (__lowercase , __lowercase ) -> Tuple:
'''simple docstring'''
while True:
_snake_case : Any = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def snake_case (__lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(__lowercase ) )
_snake_case : List[str] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case : int = []
_snake_case : List[str] = []
def tee(__lowercase , __lowercase , __lowercase , __lowercase="" ):
_snake_case : Union[str, Any] = line.decode("utf-8" ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase , __lowercase , file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowercase : tee(__lowercase , __lowercase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowercase : tee(__lowercase , __lowercase , sys.stderr , label="stderr:" ) ) ),
] , timeout=__lowercase , )
return _RunOutput(await p.wait() , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase=None , __lowercase=None , __lowercase=180 , __lowercase=False , __lowercase=True ) -> _RunOutput:
'''simple docstring'''
_snake_case : str = asyncio.get_event_loop()
_snake_case : Any = loop.run_until_complete(
_stream_subprocess(__lowercase , env=__lowercase , stdin=__lowercase , timeout=__lowercase , quiet=__lowercase , echo=__lowercase ) )
_snake_case : Tuple = " ".join(__lowercase )
if result.returncode > 0:
_snake_case : List[Any] = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class lowercase_ ( __snake_case ):
pass
def snake_case (__lowercase , __lowercase=False ) -> int:
'''simple docstring'''
try:
_snake_case : int = subprocess.check_output(__lowercase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowercase , "decode" ):
_snake_case : Dict = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(__lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 580
| 1
|
"""simple docstring"""
def lowercase ( __UpperCamelCase = 50 ) -> int:
__magic_name__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = True , A = None , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 255 , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 384}
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
# Default value set here for backwards compatibility where the value in config is None
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else 224 / 256
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_( self , A , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_SCREAMING_SNAKE_CASE = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_SCREAMING_SNAKE_CASE = int(shortest_edge / crop_pct )
_SCREAMING_SNAKE_CASE = get_resize_output_image_size(A , size=A , default_to_square=A )
_SCREAMING_SNAKE_CASE = resize(image=A , size=A , resample=A , data_format=A , **A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=A , size=(shortest_edge, shortest_edge) , data_format=A , **A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
A , size=(shortest_edge, shortest_edge) , resample=A , data_format=A , **A )
def snake_case_( self , A , A , A = None , **A , ) -> List[str]:
return rescale(A , scale=A , data_format=A , **A )
def snake_case_( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A , default_to_square=A )
_SCREAMING_SNAKE_CASE = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A , size=A , crop_pct=A , resample=A ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A , mean=A , std=A ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A , A ) for image in images]
_SCREAMING_SNAKE_CASE = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 314
| 0
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
snake_case_ : Union[str, Any] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
snake_case_ : List[str] = json.load(f)
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int , _a : Tuple ) -> Tuple:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(_a )
def __UpperCamelCase ( self : int , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def __UpperCamelCase ( self : Tuple , _a : Any , _a : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"facebook/wmt19-{pair}"
_SCREAMING_SNAKE_CASE =self.get_tokenizer(_a )
_SCREAMING_SNAKE_CASE =self.get_model(_a )
_SCREAMING_SNAKE_CASE =bleu_data[pair]['''src''']
_SCREAMING_SNAKE_CASE =bleu_data[pair]['''tgt''']
_SCREAMING_SNAKE_CASE =tokenizer(_a , return_tensors='''pt''' , truncation=_a , padding='''longest''' ).to(_a )
_SCREAMING_SNAKE_CASE =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
_SCREAMING_SNAKE_CASE =calculate_bleu(_a , _a )
print(_a )
self.assertGreaterEqual(scores['''bleu'''] , _a )
| 191
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = LxmertTokenizer
UpperCAmelCase = LxmertTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : List[str] , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE ='''I was born in 92000, and this is falsé.'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a )
_SCREAMING_SNAKE_CASE =rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 191
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(__UpperCamelCase )
DownloadCommand.register_subcommand(__UpperCamelCase )
EnvironmentCommand.register_subcommand(__UpperCamelCase )
RunCommand.register_subcommand(__UpperCamelCase )
ServeCommand.register_subcommand(__UpperCamelCase )
UserCommands.register_subcommand(__UpperCamelCase )
AddNewModelCommand.register_subcommand(__UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(__UpperCamelCase )
LfsCommands.register_subcommand(__UpperCamelCase )
PTtoTFCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if not hasattr(__UpperCamelCase ,'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE : List[str] = args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 28
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28
| 1
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( _a ):
UpperCAmelCase__ : Optional[Any] = (PNDMScheduler,)
UpperCAmelCase__ : Optional[int] = (("""num_inference_steps""", 50),)
def UpperCAmelCase(self : str , **_A : Dict ) -> Optional[int]:
snake_case = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**snake_case_ )
return config
def UpperCAmelCase(self : str , _A : Optional[Any]=0 , **_A : Optional[Any] ) -> int:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop("num_inference_steps" , snake_case_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config(**snake_case_ )
snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase(self : List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase(self : Union[str, Any] , _A : int=0 , **_A : Union[str, Any] ) -> List[str]:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop("num_inference_steps" , snake_case_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase(self : Tuple , **_A : Optional[int] ) -> Union[str, Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**snake_case_ )
snake_case = scheduler_class(**snake_case_ )
snake_case = 1_0
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case = model(snake_case_ , snake_case_ )
snake_case = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case = model(snake_case_ , snake_case_ )
snake_case = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def UpperCAmelCase(self : Tuple ) -> Union[str, Any]:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop("num_inference_steps" , snake_case_ )
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**snake_case_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , "set_timesteps" ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , "set_timesteps" ):
snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step_prk(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
snake_case = scheduler.step_prk(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case = scheduler.step_plms(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
snake_case = scheduler.step_plms(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase(self : str ) -> Dict:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def UpperCAmelCase(self : Dict ) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case_ )
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(steps_offset=1 )
snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def UpperCAmelCase(self : Tuple ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def UpperCAmelCase(self : str ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def UpperCAmelCase(self : List[str] ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def UpperCAmelCase(self : Dict ) -> Union[str, Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=snake_case_ )
def UpperCAmelCase(self : str ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=snake_case_ )
def UpperCAmelCase(self : Any ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case = 2_7
for scheduler_class in self.scheduler_classes:
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
def UpperCAmelCase(self : str ) -> List[str]:
with self.assertRaises(snake_case_ ):
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**snake_case_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase(self : Union[str, Any] ) -> Dict:
snake_case = self.full_loop()
snake_case = torch.sum(torch.abs(snake_case_ ) )
snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCAmelCase(self : Tuple ) -> Optional[Any]:
snake_case = self.full_loop(prediction_type="v_prediction" )
snake_case = torch.sum(torch.abs(snake_case_ ) )
snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCAmelCase(self : Union[str, Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
snake_case = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
snake_case = torch.sum(torch.abs(snake_case_ ) )
snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCAmelCase(self : Tuple ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
snake_case = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.01 )
snake_case = torch.sum(torch.abs(snake_case_ ) )
snake_case = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 720
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : str = PhobertTokenizer
UpperCAmelCase__ : List[Any] = False
def UpperCAmelCase(self : Union[str, Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = ["T@@", "i", "I", "R@@", "r", "e@@"]
snake_case = dict(zip(_A , range(len(_A ) ) ) )
snake_case = ["#version: 0.2", "l à</w>"]
snake_case = {"unk_token": "<unk>"}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(f'{token} {vocab_tokens[token]}\n' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def UpperCAmelCase(self : Optional[int] , **_A : Optional[int] ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase(self : Tuple , _A : Optional[Any] ) -> str:
snake_case = "Tôi là VinAI Research"
snake_case = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def UpperCAmelCase(self : Any ) -> Union[str, Any]:
snake_case = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = "Tôi là VinAI Research"
snake_case = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
snake_case = tokenizer.tokenize(_A )
print(_A )
self.assertListEqual(_A , _A )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 294
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = point_y / 4 / point_x
_UpperCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCAmelCase = outgoing_gradient**2 + 4
_UpperCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
_UpperCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCAmelCase = x_minus if isclose(lowercase ,lowercase ) else x_plus
_UpperCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCAmelCase ( lowercase = 1.4 ,lowercase = -9.6 ):
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = first_x_coord
_UpperCAmelCase = first_y_coord
_UpperCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = next_point(lowercase ,lowercase ,lowercase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 277
|
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class a ( lowerCAmelCase_ ):
_snake_case : Dict = CustomTokenizer
pass
| 277
| 1
|
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :list ):
def merge(_SCREAMING_SNAKE_CASE :list , _SCREAMING_SNAKE_CASE :list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return collection
SCREAMING_SNAKE_CASE : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = input("""Enter numbers separated by a comma:\n""").strip()
snake_case_ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 355
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
return x + 2
class a__ ( unittest.TestCase ):
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3'''
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
SCREAMING_SNAKE_CASE : str = '''x = y'''
SCREAMING_SNAKE_CASE : int = {'''y''': 5}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} )
def lowercase__ (self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 3'''
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowercase__ (self : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE : Any = {'''x''': 8}
SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} )
def lowercase__ (self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3}
SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase, [3, 5] )
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = '''y = x'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase )
assert result == 3
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE : int = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase )
assert result == 5
self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase )
assert result == 2
self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
| 355
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( _a : str , _a : List[str] , _a : int ):
snake_case_ : Optional[int] = 1.5
snake_case_ : Optional[int] = int(factor * num_class_images )
snake_case_ : int = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_a , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_a )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ : str = client.query(text=_a )
if len(_a ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ : str = int(factor * num_images )
snake_case_ : int = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_a , aesthetic_weight=0.1 , )
snake_case_ : Optional[int] = 0
snake_case_ : Any = 0
snake_case_ : Tuple = tqdm(desc="downloading real regularization images" , total=_a )
with open(F'''{class_data_dir}/caption.txt''' , "w" ) as fa, open(F'''{class_data_dir}/urls.txt''' , "w" ) as fa, open(
F'''{class_data_dir}/images.txt''' , "w" ) as fa:
while total < num_class_images:
snake_case_ : int = class_images[count]
count += 1
try:
snake_case_ : Dict = requests.get(images["url"] )
if img.status_code == 2_00:
snake_case_ : Optional[Any] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ):
snake_case_ : Optional[Any] = argparse.ArgumentParser("" , add_help=_a )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_a , type=_a )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_a , type=_a )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_00 , type=_a )
return parser.parse_args()
if __name__ == "__main__":
lowercase : Union[str, Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 568
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''')
lowercase : Optional[Any] = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase__ ( _a : str ):
with open(_a , "rb" ) as f:
snake_case_ : Tuple = Image.open(_a )
return im.convert("RGB" )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the training data.'} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A folder containing the validation data.'} )
A : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowerCAmelCase ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
A : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A : str = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCAmelCase__ ( _a : Tuple ):
snake_case_ : List[str] = torch.stack([example["pixel_values"] for example in examples] )
snake_case_ : Optional[Any] = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , _a , _a )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : Tuple = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : Dict = {}
if data_args.train_dir is not None:
snake_case_ : int = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
snake_case_ : List[str] = os.path.join(data_args.validation_dir , "**" )
snake_case_ : int = load_dataset(
"imagefolder" , data_files=_a , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[int] = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : Union[str, Any] = dataset["train"].train_test_split(data_args.train_val_split )
snake_case_ : str = split["train"]
snake_case_ : Optional[int] = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case_ : Union[str, Any] = dataset["train"].features["labels"].names
snake_case_ , snake_case_ : Optional[Any] = {}, {}
for i, label in enumerate(_a ):
snake_case_ : Optional[int] = str(_a )
snake_case_ : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case_ : Union[str, Any] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_a : Optional[int] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
snake_case_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_a ) , labelaid=_a , idalabel=_a , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
snake_case_ : Dict = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case_ : Optional[Any] = image_processor.size["shortest_edge"]
else:
snake_case_ : str = (image_processor.size["height"], image_processor.size["width"])
snake_case_ : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
snake_case_ : Union[str, Any] = Compose(
[
RandomResizedCrop(_a ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case_ : List[Any] = Compose(
[
Resize(_a ),
CenterCrop(_a ),
ToTensor(),
normalize,
] )
def train_transforms(_a : Optional[int] ):
snake_case_ : List[str] = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(_a : List[Any] ):
snake_case_ : int = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
snake_case_ : str = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_a )
# Initalize our trainer
snake_case_ : Optional[Any] = Trainer(
model=_a , args=_a , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_a , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : Tuple = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("eval" , _a )
trainer.save_metrics("eval" , _a )
# Write model card and (optionally) push to hub
snake_case_ : Union[str, Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
if __name__ == "__main__":
main()
| 568
| 1
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
A_ : Optional[Any] = DanceDiffusionPipeline
A_ : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A_ : Any = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
A_ : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A_ : List[str] = False
A_ : str = False
def _A ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
lowerCAmelCase__ : Dict = IPNDMScheduler()
lowerCAmelCase__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _A ( self : Optional[int] , a__ : Dict , a__ : List[Any]=0 ):
'''simple docstring'''
if str(lowerCAmelCase_ ).startswith("mps" ):
lowerCAmelCase__ : Dict = torch.manual_seed(lowerCAmelCase_ )
else:
lowerCAmelCase__ : str = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
lowerCAmelCase__ : int = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline(**lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCAmelCase__ : Any = self.get_dummy_inputs(lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = pipe(**lowerCAmelCase_ )
lowerCAmelCase__ : Any = output.audios
lowerCAmelCase__ : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : Any = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _A ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _A ( self : int ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _A ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _A ( self : int ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _A ( self : int ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = torch_device
lowerCAmelCase__ : int = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowerCAmelCase__ : List[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCAmelCase__ : int = torch.manual_seed(0 )
lowerCAmelCase__ : str = pipe(generator=lowerCAmelCase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
lowerCAmelCase__ : Tuple = output.audios
lowerCAmelCase__ : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = torch_device
lowerCAmelCase__ : Dict = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = pipe(generator=lowerCAmelCase_ , num_inference_steps=100 , audio_length_in_s=4.096 )
lowerCAmelCase__ : Optional[Any] = output.audios
lowerCAmelCase__ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : str = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 712
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCAmelCase_ ( lowerCamelCase_ = 2_0_0_0_0_0_0 ):
"""simple docstring"""
lowerCAmelCase__ : list[int] = [0]
lowerCAmelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCAmelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCAmelCase__ : int = 0
# an estimate of b, using the quadratic formula
lowerCAmelCase__ : float
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the largest integer less than b_estimate
lowerCAmelCase__ : int
# the triangle number corresponding to b_floor
lowerCAmelCase__ : int
# the triangle number corresponding to b_ceil
lowerCAmelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCAmelCase__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCAmelCase__ : Optional[int] = floor(lowerCamelCase_ )
lowerCAmelCase__ : Any = ceil(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = triangle_numbers[b_floor]
lowerCAmelCase__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Tuple = triangle_b_first_guess * triangle_a
lowerCAmelCase__ : List[str] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCAmelCase__ : Dict = triangle_b_second_guess * triangle_a
lowerCAmelCase__ : Dict = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 568
| 0
|
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
a__ = mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
a__ = max(
mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , mf_knapsack(i - 1 , __lowerCAmelCase , __lowerCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
a__ = val
return f[i][j]
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
a__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
a__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
a__ = dp[i - 1][w_]
return dp[n][w_], dp
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : list , __lowerCAmelCase : list ):
if not (isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(__lowerCAmelCase , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
a__ = len(__lowerCAmelCase )
if num_items != len(__lowerCAmelCase ):
a__ = (
'The number of weights must be the same as the number of values.\n'
F'But got {num_items} weights and {len(__lowerCAmelCase )} values'
)
raise ValueError(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
if not isinstance(wt[i] , __lowerCAmelCase ):
a__ = (
'All weights must be integers but got weight of '
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(__lowerCAmelCase )
a__ , a__ = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = set()
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return optimal_val, example_optional_set
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : set ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , __lowerCAmelCase , __lowerCAmelCase )
else:
optimal_set.add(__lowerCAmelCase )
_construct_solution(__lowerCAmelCase , __lowerCAmelCase , i - 1 , j - wt[i - 1] , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : List[Any] = [3, 2, 4, 4]
snake_case : str = [4, 3, 2, 3]
snake_case : Tuple = 4
snake_case : Optional[Any] = 6
snake_case : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
snake_case , snake_case : Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
snake_case , snake_case : Optional[Any] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 335
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : List[str] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''lxmert'''
UpperCAmelCase__ : Any = {}
def __init__( self :Dict ,__snake_case :Optional[Any]=3_05_22 ,__snake_case :int=7_68 ,__snake_case :int=12 ,__snake_case :Any=95_00 ,__snake_case :Union[str, Any]=16_00 ,__snake_case :str=4_00 ,__snake_case :Optional[Any]=30_72 ,__snake_case :List[str]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Dict=5_12 ,__snake_case :str=2 ,__snake_case :List[str]=0.02 ,__snake_case :Optional[int]=1E-12 ,__snake_case :Any=9 ,__snake_case :List[str]=5 ,__snake_case :Optional[Any]=5 ,__snake_case :str=20_48 ,__snake_case :Optional[Any]=4 ,__snake_case :str=6.67 ,__snake_case :Union[str, Any]=True ,__snake_case :str=True ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :List[Any]=True ,__snake_case :str=True ,__snake_case :List[str]=True ,**__snake_case :Optional[Any] ,) -> str:
a__ = vocab_size
a__ = hidden_size
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = num_qa_labels
a__ = num_object_labels
a__ = num_attr_labels
a__ = l_layers
a__ = x_layers
a__ = r_layers
a__ = visual_feat_dim
a__ = visual_pos_dim
a__ = visual_loss_normalizer
a__ = task_matched
a__ = task_mask_lm
a__ = task_obj_predict
a__ = task_qa
a__ = visual_obj_loss
a__ = visual_attr_loss
a__ = visual_feat_loss
a__ = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 335
| 1
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowerCAmelCase = None
try:
import msvcrt
except ImportError:
_lowerCAmelCase = None
try:
import fcntl
except ImportError:
_lowerCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowerCAmelCase = OSError
# Data
# ------------------------------------------------
_lowerCAmelCase = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_lowerCAmelCase = "3.0.12"
_lowerCAmelCase = None
def _lowerCAmelCase ( ) ->str:
"""simple docstring"""
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class __A ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> int:
lowercase__ = lock_file
return None
def __str__( self )-> Union[str, Any]:
lowercase__ = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> Optional[int]:
lowercase__ = lock
return None
def __enter__( self )-> str:
return self.lock
def __exit__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Any:
self.lock.release()
return None
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None )-> Optional[Any]:
lowercase__ = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def snake_case_( self )-> Optional[int]:
return self._lock_file
@property
def snake_case_( self )-> Optional[int]:
return self._timeout
@timeout.setter
def snake_case_( self , _lowerCamelCase )-> List[str]:
lowercase__ = float(lowerCamelCase__ )
return None
def snake_case_( self )-> Optional[Any]:
raise NotImplementedError()
def snake_case_( self )-> Dict:
raise NotImplementedError()
@property
def snake_case_( self )-> Dict:
return self._lock_file_fd is not None
def snake_case_( self , _lowerCamelCase=None , _lowerCamelCase=0.0_5 )-> int:
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def snake_case_( self , _lowerCamelCase=False )-> Any:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self )-> Tuple:
self.acquire()
return self
def __exit__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> Optional[Any]:
self.release()
return None
def __del__( self )-> List[str]:
self.release(force=lowerCamelCase__ )
return None
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> str:
lowercase__ = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(lowerCamelCase__ )
lowercase__ = str(hash(lowerCamelCase__ ) )
lowercase__ = filename[: max_length - len(lowerCamelCase__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class __A ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None )-> Optional[int]:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
lowercase__ = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def snake_case_( self )-> Union[str, Any]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ = fd
return None
def snake_case_( self )-> Optional[Any]:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __A ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=-1 , _lowerCamelCase=None )-> Tuple:
lowercase__ = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def snake_case_( self )-> Optional[int]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ = fd
return None
def snake_case_( self )-> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class __A ( __lowerCAmelCase ):
"""simple docstring"""
def snake_case_( self )-> List[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
lowercase__ = fd
return None
def snake_case_( self )-> Optional[Any]:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowerCAmelCase = None
if msvcrt:
_lowerCAmelCase = WindowsFileLock
elif fcntl:
_lowerCAmelCase = UnixFileLock
else:
_lowerCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 718
|
'''simple docstring'''
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 1
for i in range(0 , len(lowercase ) ):
total *= numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
def _lowerCAmelCase ( lowercase : int ) ->int:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
lowercase__ = 0
lowercase__ = str(lowercase )
while len(lowercase ) != 1:
lowercase__ = [int(lowercase ) for i in num_string]
lowercase__ = 0
for i in range(0 , len(lowercase ) ):
total += numbers[i]
lowercase__ = str(lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 0
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def __snake_case ( SCREAMING_SNAKE_CASE__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def __snake_case ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : int = np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : Union[str, Any] = features[:, labels == i]
_UpperCAmelCase : str = data.mean(1 )
# Centralize the data of class i
_UpperCAmelCase : str = data - column_reshape(SCREAMING_SNAKE_CASE__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : int = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T )
return covariance_sum / features.shape[1]
def __snake_case ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Any = features.mean(1 )
_UpperCAmelCase : str = np.nan
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : str = features[:, labels == i]
_UpperCAmelCase : List[str] = data.shape[1]
_UpperCAmelCase : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_UpperCAmelCase : Dict = device_data * np.dot(
column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ ) , (column_reshape(SCREAMING_SNAKE_CASE__ ) - column_reshape(SCREAMING_SNAKE_CASE__ )).T , )
return covariance_sum / features.shape[1]
def __snake_case ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
_UpperCAmelCase : Dict = features.mean(1 )
# Center the dataset
_UpperCAmelCase : Union[str, Any] = features - np.reshape(SCREAMING_SNAKE_CASE__ , (data_mean.size, 1) )
_UpperCAmelCase : List[Any] = np.dot(SCREAMING_SNAKE_CASE__ , centered_data.T ) / features.shape[1]
_UpperCAmelCase , _UpperCAmelCase : List[str] = np.linalg.eigh(SCREAMING_SNAKE_CASE__ )
# Take all the columns in the reverse order (-1), and then takes only the first
_UpperCAmelCase : Optional[int] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_UpperCAmelCase : Union[str, Any] = np.dot(filtered_eigenvectors.T , SCREAMING_SNAKE_CASE__ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=SCREAMING_SNAKE_CASE__ )
logging.error("Dataset empty" )
raise AssertionError
def __snake_case ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = eigh(
covariance_between_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , covariance_within_classes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
_UpperCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = np.linalg.svd(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : str = svd_matrix[:, 0:dimensions]
_UpperCAmelCase : Optional[Any] = np.dot(filtered_svd_matrix.T , SCREAMING_SNAKE_CASE__ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=SCREAMING_SNAKE_CASE__ )
logging.error("Dataset empty" )
raise AssertionError
def __snake_case ( ) -> None:
'''simple docstring'''
_UpperCAmelCase : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_UpperCAmelCase : int = np.array([0, 0, 0, 1, 1] )
_UpperCAmelCase : Any = 2
_UpperCAmelCase : str = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
_UpperCAmelCase : Dict = linear_discriminant_analysis(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def __snake_case ( ) -> None:
'''simple docstring'''
_UpperCAmelCase : Tuple = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Tuple = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(SCREAMING_SNAKE_CASE__ ) as error_info:
_UpperCAmelCase : str = principal_component_analysis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f'{test_file} instead.' )
_UpperCAmelCase : int = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase : Any = components[:-1] + [test_fn.replace(".py" , "" )]
_UpperCAmelCase : List[str] = ".".join(SCREAMING_SNAKE_CASE__ )
return test_module_path
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = get_module_path(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
return test_module
def __snake_case ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Tuple = get_test_module(SCREAMING_SNAKE_CASE__ )
for attr in dir(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = test_class()
if hasattr(SCREAMING_SNAKE_CASE__ , "setUp" ):
test.setUp()
_UpperCAmelCase : int = None
if hasattr(SCREAMING_SNAKE_CASE__ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase : List[str] = test.model_tester.__class__
return model_tester
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Optional[int] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = []
for test_class in test_classes:
_UpperCAmelCase : Optional[Any] = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE__ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x.__name__ )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_test_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE__ ) for test_class in test_classes}
return test_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_test_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = get_model_classes(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : Union[str, Any] = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for model_class in model_classes
}
return model_to_tester_mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE__ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {to_json(SCREAMING_SNAKE_CASE__ ): to_json(SCREAMING_SNAKE_CASE__ ) for k, v in o.items()}
else:
return o
| 289
| 1
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase:
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=3 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = parent
lowercase__ : Optional[Any] = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Any = is_training
lowercase__ : Any = use_input_mask
lowercase__ : Union[str, Any] = use_token_type_ids
lowercase__ : Any = use_labels
lowercase__ : Optional[Any] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[Any] = type_vocab_size
lowercase__ : int = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : List[Any] = num_choices
lowercase__ : Any = scope
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
lowercase__ : Any = None
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Optional[int]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=lowerCamelCase , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : int = FalconModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase )
lowercase__ : int = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = True
lowercase__ : Dict = FalconModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
lowercase__ : List[Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
lowercase__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = True
lowercase__ : int = True
lowercase__ : Optional[Any] = FalconForCausalLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# first forward pass
lowercase__ : Tuple = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
lowercase__ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
lowercase__ : Optional[int] = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
lowercase__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : int = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Optional[Any] = config_and_inputs
lowercase__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : Dict = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a : Union[str, Any] = (FalconForCausalLM,) if is_torch_available() else ()
a : Dict = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Optional[int] = False
a : Optional[Any] = False
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = FalconModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ , *lowercase__ : str = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowercase__ : Tuple = alibi
self.model_tester.create_and_check_model(lowerCamelCase , *lowerCamelCase )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : str = input_dict["input_ids"]
lowercase__ : Dict = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : str = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = 3
lowercase__ : Union[str, Any] = "single_label_classification"
lowercase__ : str = input_dict["input_ids"]
lowercase__ : Optional[Any] = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase__ : Union[str, Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Any = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = input_dict["input_ids"]
lowercase__ : List[Any] = FalconForCausalLM(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : Union[str, Any] = model(lowerCamelCase , use_cache=lowerCamelCase )
lowercase__ : str = input_ids.shape[0]
lowercase__ : List[Any] = model._convert_to_rw_cache(result.past_key_values )
lowercase__ : Union[str, Any] = model._convert_cache_to_standard_format(lowerCamelCase , lowerCamelCase )
for layer in range(len(lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = 3
lowercase__ : Optional[Any] = "multi_label_classification"
lowercase__ : Dict = input_dict["input_ids"]
lowercase__ : List[str] = input_ids.ne(1 ).to(lowerCamelCase )
lowercase__ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase__ : List[Any] = FalconForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class in self.all_generative_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(lowerCamelCase , "use_cache" ):
return
lowercase__ : str = model_class(lowerCamelCase ).to(lowerCamelCase )
if "use_cache" not in inputs:
lowercase__ : Dict = True
lowercase__ : Dict = model(**lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowercase__ : Tuple = (
getattr(lowerCamelCase , "decoder_layers" , lowerCamelCase )
or getattr(lowerCamelCase , "num_decoder_layers" , lowerCamelCase )
or config.num_hidden_layers
)
lowercase__ : int = getattr(lowerCamelCase , "num_kv_heads" , config.num_attention_heads )
lowercase__ : Optional[int] = getattr(lowerCamelCase , "d_model" , config.hidden_size )
lowercase__ : List[Any] = embed_dim // num_attention_heads
lowercase__ : Any = outputs["past_key_values"]
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
lowercase__ , lowercase__ : List[str] = inputs["input_ids"].shape
for i in range(lowerCamelCase ):
if config.new_decoder_architecture:
lowercase__ : Any = config.num_attention_heads
elif config.multi_query:
lowercase__ : str = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(lowerCamelCase )
lowercase__ : str = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCamelCase )
lowercase__ : Optional[int] = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
lowercase__ : Optional[int] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=19 )
lowercase__ : Dict = tokenizer.batch_decode(lowerCamelCase )[0]
self.assertEqual(lowerCamelCase , lowerCamelCase )
@slow
def __a ( self ) -> Tuple:
"""simple docstring"""
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
lowercase__ : str = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(lowerCamelCase )
lowercase__ : Dict = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=4 )
model.generate(**lowerCamelCase , num_beams=2 , max_new_tokens=4 )
@slow
def __a ( self ) -> str:
"""simple docstring"""
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase )
lowercase__ : List[Any] = FalconForCausalLM.from_pretrained(lowerCamelCase )
model.eval()
model.to(device=lowerCamelCase )
lowercase__ : Any = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCamelCase )
# Test results are the same with and without cache
lowercase__ : List[Any] = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
lowercase__ : str = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 , use_cache=lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 298
|
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowercase__ : List[str] = [True] * limit
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ : Dict = i * 2
while index < limit:
lowercase__ : Union[str, Any] = False
lowercase__ : str = index + i
lowercase__ : Union[str, Any] = [2]
for i in range(3 ,SCREAMING_SNAKE_CASE_ ,2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length ,len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict = j - i
lowercase__ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 298
| 1
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
__SCREAMING_SNAKE_CASE ="https://www.google.com/search?q=" + " ".join(sys.argv[1:])
__SCREAMING_SNAKE_CASE =requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
__SCREAMING_SNAKE_CASE =BeautifulSoup(res.text, "html.parser")
__SCREAMING_SNAKE_CASE =list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F"https://google.com{link.get('href')}")
| 425
|
"""simple docstring"""
from __future__ import annotations
def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425
| 1
|
"""simple docstring"""
from collections import defaultdict
def A_ ( UpperCAmelCase__ ) -> int:
a : Any = 1
a : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCAmelCase__ )
if ret % 2 == 0:
cuts.append(UpperCAmelCase__ )
return ret
def A_ ( ) -> List[str]:
dfs(1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 10, 9
SCREAMING_SNAKE_CASE__ : int = defaultdict(list)
SCREAMING_SNAKE_CASE__ : dict[int, bool] = {}
SCREAMING_SNAKE_CASE__ : list[int] = []
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 509
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> Any:
a , a : str = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
a : List[Any] = 'A painting of a squirrel eating a burger'
a : Union[str, Any] = jax.device_count()
a : Optional[int] = num_samples * [prompt]
a : Tuple = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Dict = replicate(__UpperCAmelCase )
a : int = shard(__UpperCAmelCase )
a : str = jax.random.PRNGKey(0 )
a : List[Any] = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Optional[int] = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : Optional[Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> Union[str, Any]:
a : str = 'stabilityai/stable-diffusion-2'
a , a : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='scheduler' )
a , a : Any = FlaxStableDiffusionPipeline.from_pretrained(
__UpperCAmelCase , scheduler=__UpperCAmelCase , revision='bf16' , dtype=jnp.bfloataa , )
a : Union[str, Any] = scheduler_params
a : Any = 'A painting of a squirrel eating a burger'
a : Any = jax.device_count()
a : str = num_samples * [prompt]
a : Optional[Any] = sd_pipe.prepare_inputs(__UpperCAmelCase )
a : Optional[Any] = replicate(__UpperCAmelCase )
a : Union[str, Any] = shard(__UpperCAmelCase )
a : Optional[int] = jax.random.PRNGKey(0 )
a : str = jax.random.split(__UpperCAmelCase , jax.device_count() )
a : Tuple = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
a : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
a : List[str] = images[0, 2_53:2_56, 2_53:2_56, -1]
a : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a : Tuple = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 509
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 286
|
'''simple docstring'''
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = BertTokenizer
A__ = BertTokenizerFast
A__ = True
A__ = True
A__ = filter_non_english
def A_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : Dict , __a : Optional[int] ) -> int:
'''simple docstring'''
__snake_case : List[Any] = 'UNwant\u00E9d,running'
__snake_case : Tuple = 'unwanted, running'
return input_text, output_text
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__snake_case : Any = self.tokenizer_class(self.vocab_file )
__snake_case : Tuple = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] )
def A_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case : int = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : int = 'UNwant\u00E9d,running'
__snake_case : Any = tokenizer.tokenize(__a )
__snake_case : Dict = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : Optional[int] = tokenizer.encode(__a , add_special_tokens=__a )
__snake_case : Optional[int] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__snake_case : str = self.get_rust_tokenizer()
__snake_case : Tuple = tokenizer.encode(__a )
__snake_case : Tuple = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
# With lower casing
__snake_case : Union[str, Any] = self.get_tokenizer(do_lower_case=__a )
__snake_case : Any = self.get_rust_tokenizer(do_lower_case=__a )
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : List[Any] = tokenizer.tokenize(__a )
__snake_case : List[str] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a )
__snake_case : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
__snake_case : Dict = tokenizer.encode(__a )
__snake_case : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def A_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A_ ( self : List[str] ) -> Any:
'''simple docstring'''
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : str ) -> int:
'''simple docstring'''
__snake_case : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : str ) -> str:
'''simple docstring'''
__snake_case : Tuple = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : int = BasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Any = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : List[str] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = BasicTokenizer()
__snake_case : List[str] = 'a\n\'ll !!to?\'d of, can\'t.'
__snake_case : str = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : str = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : Dict = {}
for i, token in enumerate(__a ):
__snake_case : str = i
__snake_case : List[str] = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def A_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__snake_case : str = self.get_tokenizer()
__snake_case : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def A_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('bert-base-uncased' )
__snake_case : Dict = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : int = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : Optional[int] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__snake_case : Optional[Any] = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__snake_case : Tuple = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__snake_case : Optional[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case' ) else False
__snake_case : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def A_ ( self : Any ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = ['的', '人', '有']
__snake_case : List[Any] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case : List[Any] = True
__snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__snake_case : Union[str, Any] = tokenizer_p.encode(__a , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a )
__snake_case : Dict = tokenizer_r.convert_ids_to_tokens(__a )
__snake_case : str = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
__snake_case : Any = False
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a )
__snake_case : int = tokenizer_r.encode(__a , add_special_tokens=__a )
__snake_case : str = tokenizer_p.encode(__a , add_special_tokens=__a )
__snake_case : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__a )
__snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : Dict = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
| 286
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : List[Any] = (UniPCMultistepScheduler,)
A__ : Any = (('''num_inference_steps''', 2_5),)
def snake_case_ ( self : Dict , **_snake_case : Union[str, Any] ):
__lowercase : List[str] = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**_snake_case )
return config
def snake_case_ ( self : str , _snake_case : List[str]=0 , **_snake_case : str ):
__lowercase : Tuple = dict(self.forward_default_kwargs )
__lowercase : Dict = kwargs.pop('''num_inference_steps''' , _snake_case )
__lowercase : int = self.dummy_sample
__lowercase : int = 0.1 * sample
__lowercase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowercase : List[Any] = self.get_scheduler_config(**_snake_case )
__lowercase : Tuple = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
__lowercase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
__lowercase : List[Any] = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
__lowercase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase , __lowercase : Dict = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1 ):
__lowercase : int = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__lowercase : int = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self : Dict , _snake_case : Optional[int]=0 , **_snake_case : Dict ):
__lowercase : Any = dict(self.forward_default_kwargs )
__lowercase : List[Any] = kwargs.pop('''num_inference_steps''' , _snake_case )
__lowercase : int = self.dummy_sample
__lowercase : Optional[Any] = 0.1 * sample
__lowercase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowercase : Any = self.get_scheduler_config()
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
__lowercase : Tuple = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
__lowercase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase : str = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__lowercase : Union[str, Any] = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self : Tuple , _snake_case : Dict=None , **_snake_case : List[str] ):
if scheduler is None:
__lowercase : List[str] = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config(**_snake_case )
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
__lowercase : str = self.scheduler_classes[0]
__lowercase : int = self.get_scheduler_config(**_snake_case )
__lowercase : Any = scheduler_class(**_snake_case )
__lowercase : Optional[Any] = 10
__lowercase : Union[str, Any] = self.dummy_model()
__lowercase : Any = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : List[str] = model(_snake_case , _snake_case )
__lowercase : int = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def snake_case_ ( self : Dict ):
__lowercase : List[Any] = dict(self.forward_default_kwargs )
__lowercase : str = kwargs.pop('''num_inference_steps''' , _snake_case )
for scheduler_class in self.scheduler_classes:
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : List[Any] = scheduler_class(**_snake_case )
__lowercase : str = self.dummy_sample
__lowercase : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , '''set_timesteps''' ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , '''set_timesteps''' ):
__lowercase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowercase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
__lowercase : Optional[int] = scheduler.timesteps[5]
__lowercase : List[str] = scheduler.timesteps[6]
__lowercase : Any = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
__lowercase : List[Any] = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowercase : Any = UniPCMultistepScheduler(**self.get_scheduler_config() )
__lowercase : Union[str, Any] = self.full_loop(scheduler=_snake_case )
__lowercase : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
__lowercase : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowercase : List[Any] = DEISMultistepScheduler.from_config(scheduler.config )
__lowercase : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowercase : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
__lowercase : Dict = self.full_loop(scheduler=_snake_case )
__lowercase : Optional[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def snake_case_ ( self : int ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case_ ( self : List[str] ):
self.check_over_configs(thresholding=_snake_case )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , solver_order=_snake_case , solver_type=_snake_case , )
def snake_case_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case_ ( self : List[str] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , )
__lowercase : str = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , )
assert not torch.isnan(_snake_case ).any(), "Samples have nan numbers"
def snake_case_ ( self : Any ):
self.check_over_configs(lower_order_final=_snake_case )
self.check_over_configs(lower_order_final=_snake_case )
def snake_case_ ( self : Dict ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0 )
def snake_case_ ( self : Tuple ):
__lowercase : int = self.full_loop()
__lowercase : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def snake_case_ ( self : Tuple ):
__lowercase : Dict = self.full_loop(prediction_type='''v_prediction''' )
__lowercase : Dict = torch.mean(torch.abs(_snake_case ) )
assert abs(result_mean.item() - 0.10_14 ) < 1E-3
def snake_case_ ( self : int ):
__lowercase : List[str] = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0 )
__lowercase : str = scheduler_class(**_snake_case )
__lowercase : Any = 10
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = model(_snake_case , _snake_case )
__lowercase : Union[str, Any] = scheduler.step(_snake_case , _snake_case , _snake_case ).prev_sample
assert sample.dtype == torch.floataa
def snake_case_ ( self : Optional[Any] , **_snake_case : List[str] ):
for scheduler_class in self.scheduler_classes:
__lowercase : int = self.get_scheduler_config(**_snake_case )
__lowercase : Optional[Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 284
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__lowerCAmelCase : Tuple = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__lowerCAmelCase : Dict = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__lowerCAmelCase : Optional[int] = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCAmelCase_ ( __lowerCAmelCase ) -> str:
def remove_articles(__lowerCAmelCase ):
__lowercase : int = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__lowerCAmelCase , ''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
__lowercase : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 100
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
__lowercase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams]
__lowercase : Dict = Counter(__lowerCAmelCase )
__lowercase : Union[str, Any] = Counter(__lowerCAmelCase )
__lowercase : str = Counter()
for sgram, scount in sgramcounter.items():
__lowercase : List[Any] = scount * numref
__lowercase : Optional[Any] = Counter(__lowerCAmelCase )
__lowercase : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
__lowercase : Union[str, Any] = ccount * numref
# KEEP
__lowercase : Optional[Any] = sgramcounter_rep & cgramcounter_rep
__lowercase : Dict = keepgramcounter_rep & rgramcounter
__lowercase : str = sgramcounter_rep & rgramcounter
__lowercase : Tuple = 0
__lowercase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : Any = 1
__lowercase : Union[str, Any] = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : List[str] = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__lowercase : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__lowercase : Dict = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__lowercase : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__lowercase : Tuple = sgramcounter_rep - cgramcounter_rep
__lowercase : str = delgramcounter_rep - rgramcounter
__lowercase : int = sgramcounter_rep - rgramcounter
__lowercase : Dict = 0
__lowercase : List[str] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : Union[str, Any] = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : Union[str, Any] = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
__lowercase : int = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
__lowercase : Tuple = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
__lowercase : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
__lowercase : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__lowercase : int = 1
__lowercase : Any = 1
if len(__lowerCAmelCase ) > 0:
__lowercase : Any = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowercase : str = addtmpscore / len(__lowerCAmelCase )
__lowercase : int = 0
if addscore_precision > 0 or addscore_recall > 0:
__lowercase : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Tuple = len(__lowerCAmelCase )
__lowercase : str = ssent.split(''' ''' )
__lowercase : List[str] = csent.split(''' ''' )
__lowercase : Tuple = []
__lowercase : Any = []
__lowercase : Optional[Any] = []
__lowercase : Optional[int] = []
__lowercase : Dict = []
__lowercase : Optional[int] = []
__lowercase : Dict = []
__lowercase : str = []
__lowercase : Dict = []
__lowercase : List[Any] = []
for rsent in rsents:
__lowercase : Any = rsent.split(''' ''' )
__lowercase : Optional[int] = []
__lowercase : List[Any] = []
__lowercase : Tuple = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : Union[str, Any] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : int = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : Dict = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : int = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : str = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
__lowercase : int = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
__lowercase : Any = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
__lowercase : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : List[str] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Optional[int] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Optional[Any] = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((__lowercase) , (__lowercase) , (__lowercase)) : Any = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__lowercase : Any = sum([delascore, delascore, delascore, delascore] ) / 4
__lowercase : int = sum([addascore, addascore, addascore, addascore] ) / 4
__lowercase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = "13a" , __lowerCAmelCase = True ) -> List[str]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__lowercase : List[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__lowercase : Union[str, Any] = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
__lowercase : str = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
__lowercase : Tuple = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
__lowercase : str = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
__lowercase : int = sentence
if not return_str:
__lowercase : List[Any] = normalized_sent.split()
return normalized_sent
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__lowercase : str = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
__lowercase : int = sari_score / len(__lowerCAmelCase )
return 100 * sari_score
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="exp" , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> str:
__lowercase : List[Any] = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase : int = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
__lowercase : List[str] = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case_ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : List[Any] ):
__lowercase : List[Any] = {}
result.update({'''sari''': compute_sari(sources=_snake_case , predictions=_snake_case , references=_snake_case )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_snake_case , references=_snake_case )} )
result.update({'''exact''': compute_em(predictions=_snake_case , references=_snake_case )} )
return result
| 284
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , *UpperCamelCase_ , **UpperCamelCase_ ):
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
lowercase_ :str = self.model.config
else:
lowercase_ :Union[str, Any] = config
lowercase_ :Any = data_args
lowercase_ :Tuple = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase_ :Tuple = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase_ :Union[str, Any] = label_smoothed_nll_loss
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.optimizer is None:
lowercase_ :int = ['''bias''', '''LayerNorm.weight''']
lowercase_ :str = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase_ :Any = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase_ :Optional[int] = Adafactor
lowercase_ :str = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase_ :List[str] = AdamW
lowercase_ :Union[str, Any] = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase_ :Optional[Any] = self.args.learning_rate
if self.sharded_ddp:
lowercase_ :int = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
lowercase_ :Tuple = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
lowercase_ :Union[str, Any] = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase_ :Tuple = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase_ :Any = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
lowercase_ :Optional[Any] = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def UpperCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase_ :List[str] = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
lowercase_ :int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
lowercase_ , lowercase_ :List[Any] = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
lowercase_ :Dict = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
lowercase_ :List[Any] = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
lowercase_ , lowercase_ :List[str] = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = inputs.pop('''labels''' )
lowercase_ , lowercase_ :Dict = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ):
lowercase_ :Dict = self._prepare_inputs(__UpperCamelCase )
lowercase_ :Tuple = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase_ :List[Any] = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase_ :int = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
lowercase_ :List[str] = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase_ , lowercase_ :Union[str, Any] = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowercase_ :List[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase_ :Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase_ :Optional[Any] = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
# If PAD token is not defined at least EOS token has to be defined
lowercase_ :List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
lowercase_ :Optional[Any] = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
lowercase_ :Optional[int] = tensor
return padded_tensor
| 257
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["projector.weight"]
_UpperCAmelCase = downstream_dict["projector.bias"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["model.linear.weight"]
_UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
_UpperCAmelCase = downstream_dict["connector.weight"]
_UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
_UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase = checkpoint["Downstream"]
_UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
_UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
__lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 684
| 0
|
"""simple docstring"""
from __future__ import annotations
a_ = list[list[int]]
# assigning initial values to the grid
a_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __UpperCAmelCase ( __UpperCamelCase ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __UpperCAmelCase ( __UpperCamelCase ):
if location := find_empty_location(lowerCAmelCase__ ):
__lowercase ,__lowercase : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase : List[str] = digit
if sudoku(lowerCAmelCase__ ) is not None:
return grid
__lowercase : Union[str, Any] = 0
return None
def __UpperCAmelCase ( __UpperCamelCase ):
for row in grid:
for cell in row:
print(lowerCAmelCase__ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 2_0)
print_solution(example_grid)
print('\nExample grid solution:')
a_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 714
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = len(__UpperCamelCase )
for i in range(length - 1 ):
__lowercase : Optional[Any] = i
for k in range(i + 1 , __UpperCamelCase ):
if collection[k] < collection[least]:
__lowercase : int = k
if least != i:
__lowercase ,__lowercase : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 523
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Any=32 , lowerCamelCase : List[str]=3 , lowerCamelCase : Any=4 , lowerCamelCase : Tuple=[10, 20, 30, 40] , lowerCamelCase : Dict=[2, 2, 3, 2] , lowerCamelCase : List[Any]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Dict=37 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : List[Any]=10 , lowerCamelCase : Any=0.02 , lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , lowerCamelCase : str=3 , lowerCamelCase : str=None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = num_stages
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCamelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def lowerCamelCase ( self : Any , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UperNetForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_lowerCamelCase = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = UperNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def lowerCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(lowerCamelCase )
_UpperCAmelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
_UpperCAmelCase = Image.open(__snake_case ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
_UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
_UpperCAmelCase = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowerCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 108
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''trocr'''
__A = ['''past_key_values''']
__A = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _lowerCAmelCase=5_0265 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=16 , _lowerCAmelCase=4096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
lowercase = vocab_size
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = activation_function
lowercase = max_position_embeddings
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = init_std
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = scale_embedding
lowercase = use_learned_position_embeddings
lowercase = layernorm_embedding
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 588
| 0
|
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowercase__ ) != 0:
SCREAMING_SNAKE_CASE : int = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase__ ) != cols:
raise error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise error
SCREAMING_SNAKE_CASE : Tuple = rows
else:
SCREAMING_SNAKE_CASE : Tuple = []
def _UpperCamelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCamelCase ( self ) -> int:
return len(self.rows )
@property
def _UpperCamelCase ( self ) -> int:
return len(self.rows[0] )
@property
def _UpperCamelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def _UpperCamelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def _UpperCamelCase ( self ) -> Matrix:
SCREAMING_SNAKE_CASE : List[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase__ )
def _UpperCamelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCamelCase ( self ) -> bool:
return bool(self.determinant() )
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase__ ).determinant()
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowercase__ , lowercase__ )
return -1 * self.get_minor(lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(lowercase__ , lowercase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCamelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCamelCase ( self ) -> Matrix:
SCREAMING_SNAKE_CASE : Any = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase__ )
def _UpperCamelCase ( self ) -> Matrix:
SCREAMING_SNAKE_CASE : Optional[Any] = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowercase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> None:
SCREAMING_SNAKE_CASE : str = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowercase__ )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCamelCase ( self , lowercase__ , lowercase__ = None ) -> None:
SCREAMING_SNAKE_CASE : Tuple = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in column:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
SCREAMING_SNAKE_CASE : Optional[int] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowercase__ ) -> bool:
if not isinstance(lowercase__ , lowercase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowercase__ ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , lowercase__ ) -> Matrix:
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowercase__ ) -> Matrix:
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowercase__ ) -> Matrix:
if isinstance(lowercase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase__ , lowercase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowercase__ , lowercase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , lowercase__ ) -> Matrix:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCamelCase ( cls , lowercase__ , lowercase__ ) -> int:
return sum(row[i] * column[i] for i in range(len(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( a_ , a_ = None ) -> list[list[str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = word_bank or []
# create a table
SCREAMING_SNAKE_CASE : int = len(a_ ) + 1
SCREAMING_SNAKE_CASE : list[list[list[str]]] = []
for _ in range(a_ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE : List[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(a_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(a_ )] == word:
SCREAMING_SNAKE_CASE : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(a_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(a_ )]:
combination.reverse()
return table[len(a_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 179
| 1
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = 'van'
def __init__( self : Union[str, Any] , __lowercase : int=224 , __lowercase : List[Any]=3 , __lowercase : List[str]=[7, 3, 3, 3] , __lowercase : Tuple=[4, 2, 2, 2] , __lowercase : Any=[64, 128, 320, 512] , __lowercase : str=[3, 3, 12, 3] , __lowercase : Tuple=[8, 8, 4, 4] , __lowercase : Union[str, Any]="gelu" , __lowercase : Optional[int]=0.02 , __lowercase : Union[str, Any]=1e-6 , __lowercase : Tuple=1e-2 , __lowercase : int=0.0 , __lowercase : Any=0.0 , **__lowercase : Any , ) -> List[str]:
super().__init__(**__lowercase )
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[str] = patch_sizes
__UpperCAmelCase : Tuple = strides
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Tuple = depths
__UpperCAmelCase : Optional[int] = mlp_ratios
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Dict = layer_norm_eps
__UpperCAmelCase : Union[str, Any] = layer_scale_init_value
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : Optional[int] = dropout_rate
| 63
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
A_ : List[Any] = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# A mock response for an HTTP head request to emulate server down
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : Optional[Any] = 5_0_0
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = HTTPError
snake_case__ : int = {}
# Download this model to make sure it's in the cache.
snake_case__ : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__SCREAMING_SNAKE_CASE ) as mock_head:
snake_case__ : List[Any] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self ):
# This test is for deprecated behavior and can be removed in v5
snake_case__ : Optional[Any] = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def __UpperCamelCase ( self ):
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case__ : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCamelCase ( cls ):
snake_case__ : str = TOKEN
HfFolder.save_token(__SCREAMING_SNAKE_CASE )
@classmethod
def __UpperCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def __UpperCamelCase ( self ):
snake_case__ : Dict = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="""test-image-processor""" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case__ : List[str] = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case__ : List[str] = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__SCREAMING_SNAKE_CASE , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__SCREAMING_SNAKE_CASE , use_auth_token=self._token )
snake_case__ : Any = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__SCREAMING_SNAKE_CASE , getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
CustomImageProcessor.register_for_auto_class()
snake_case__ : List[str] = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case__ : List[Any] = AutoImageProcessor.from_pretrained(
f"{USER}/test-dynamic-image-processor" , trust_remote_code=__SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 718
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A_ : Any = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A_ : int = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
A_ : Optional[Any] = {"mobilebert-uncased": 512}
A_ : Optional[Any] = {}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = MobileBertTokenizer
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="[UNK]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="[PAD]" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
snake_case__ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
snake_case__ : List[str] = do_lower_case
snake_case__ : List[Any] = strip_accents
snake_case__ : Any = tokenize_chinese_chars
snake_case__ : List[Any] = normalizer_class(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = do_lower_case
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ):
snake_case__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 419
| 0
|
class a :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = {}
def UpperCAmelCase ( self : str ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowercase , """ -> """ , """ -> """.join([str(__lowercase ) for j in self.vertex[i]] ) )
def UpperCAmelCase ( self : Tuple , __lowercase : int , __lowercase : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowercase )
else:
# else make a new vertex
__UpperCAmelCase : Optional[int] = [to_vertex]
def UpperCAmelCase ( self : int ) -> None:
# visited array for storing already visited nodes
__UpperCAmelCase : Union[str, Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : int , __lowercase : list ) -> None:
# mark start vertex as visited
__UpperCAmelCase : Optional[Any] = True
print(__lowercase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowercase , __lowercase )
if __name__ == "__main__":
a : Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 63
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : str , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Dict , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 176
| 0
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCAmelCase_ ,**UpperCAmelCase_ ):
pass
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Tuple = np.array(snake_case_ )
_lowercase : int = npimg.shape
return {"hash": hashimage(snake_case_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE_ : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = MaskGenerationPipeline(model=_a ,image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self ):
pass
@slow
@require_torch
def lowerCamelCase__ ( self ):
_lowercase : List[Any] = pipeline("""mask-generation""" ,model="""facebook/sam-vit-huge""" )
_lowercase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,points_per_batch=2_56 )
# Shortening by hashing
_lowercase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8871}
] ,)
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self ):
_lowercase : Dict = """facebook/sam-vit-huge"""
_lowercase : List[Any] = pipeline("""mask-generation""" ,model=_a )
_lowercase : Dict = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" ,pred_iou_thresh=1 ,points_per_batch=2_56 )
# Shortening by hashing
_lowercase : Optional[int] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a ,decimals=4 ) ,[
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0053},
] ,)
| 710
|
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase: List[Any] = logging.get_logger(__name__)
class UpperCamelCase ( enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : str = 1
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "generated"
def __init__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ,):
_lowercase : str = {}
if truncation is not None:
_lowercase : Dict = truncation
_lowercase : Any = generate_kwargs
_lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
_lowercase : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : Optional[Any] = self.tokenizer.encode(UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_lowercase : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Optional[Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] ,UpperCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_lowercase : int = ([prefix + arg for arg in args[0]],)
_lowercase : Any = True
elif isinstance(args[0] ,UpperCAmelCase_ ):
_lowercase : Optional[int] = (prefix + args[0],)
_lowercase : List[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_lowercase : List[Any] = self.tokenizer(*UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
_lowercase : Union[str, Any] = super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
if (
isinstance(args[0] ,UpperCAmelCase_ )
and all(isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) for el in args[0] )
and all(len(UpperCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,**UpperCAmelCase_ ):
_lowercase : List[str] = self._parse_and_tokenize(UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,**UpperCAmelCase_ )
return inputs
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
if self.framework == "pt":
_lowercase , _lowercase : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_lowercase , _lowercase : Tuple = tf.shape(model_inputs["""input_ids"""] ).numpy()
_lowercase : List[str] = generate_kwargs.get("""min_length""" ,self.model.config.min_length )
_lowercase : Any = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
self.check_inputs(UpperCAmelCase_ ,generate_kwargs["""min_length"""] ,generate_kwargs["""max_length"""] )
_lowercase : List[str] = self.model.generate(**UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : str = output_ids.shape[0]
if self.framework == "pt":
_lowercase : List[str] = output_ids.reshape(UpperCAmelCase_ ,out_b // in_b ,*output_ids.shape[1:] )
elif self.framework == "tf":
_lowercase : Tuple = tf.reshape(UpperCAmelCase_ ,(in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_=ReturnType.TEXT ,UpperCAmelCase_=False ):
_lowercase : List[str] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_lowercase : List[str] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ ,clean_up_tokenization_spaces=UpperCAmelCase_ ,)
}
records.append(UpperCAmelCase_ )
return records
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "summary"
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(snake_case )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "translation"
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self ,*UpperCAmelCase_ ,UpperCAmelCase_=TruncationStrategy.DO_NOT_TRUNCATE ,UpperCAmelCase_=None ,UpperCAmelCase_=None ):
if getattr(self.tokenizer ,"""_build_translation_inputs""" ,UpperCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase_ ,return_tensors=self.framework ,truncation=UpperCAmelCase_ ,src_lang=UpperCAmelCase_ ,tgt_lang=UpperCAmelCase_ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase_ ,truncation=UpperCAmelCase_ )
def lowerCamelCase__ ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,**UpperCAmelCase_ ):
_lowercase , _lowercase , _lowercase : Any = super()._sanitize_parameters(**UpperCAmelCase_ )
if src_lang is not None:
_lowercase : List[Any] = src_lang
if tgt_lang is not None:
_lowercase : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_lowercase : Optional[int] = kwargs.get("""task""" ,self.task )
_lowercase : Union[str, Any] = task.split("""_""" )
if task and len(UpperCAmelCase_ ) == 4:
# translation, XX, to YY
_lowercase : Optional[Any] = items[1]
_lowercase : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self ,*UpperCAmelCase_ ,**UpperCAmelCase_ ):
return super().__call__(*UpperCAmelCase_ ,**UpperCAmelCase_ )
| 600
| 0
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : str , snake_case : Optional[int] )-> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = AlbertConfig.from_json_file(UpperCamelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
UpperCAmelCase__ : Optional[Any] = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 438
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
a_ = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
a_ = {
'facebook/m2m100_418M': 1_0_2_4,
}
# fmt: off
a_ = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = []
snake_case_ = []
def __init__( self : Dict , __lowercase : str , __lowercase : Any , __lowercase : Optional[Any]=None , __lowercase : int=None , __lowercase : Any="<s>" , __lowercase : str="</s>" , __lowercase : List[str]="</s>" , __lowercase : Union[str, Any]="<pad>" , __lowercase : Union[str, Any]="<unk>" , __lowercase : Dict="m2m100" , __lowercase : Optional[Dict[str, Any]] = None , __lowercase : Optional[Any]=8 , **__lowercase : List[str] , ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE__ : Any =language_codes
SCREAMING_SNAKE_CASE__ : Optional[int] =FAIRSEQ_LANGUAGE_CODES[language_codes]
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code}
SCREAMING_SNAKE_CASE__ : str =kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(__lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__lowercase , tgt_lang=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , language_codes=__lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_json(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] ={v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : List[str] =spm_file
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_spm(__lowercase , self.sp_model_kwargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] =len(self.encoder )
SCREAMING_SNAKE_CASE__ : int ={
self.get_lang_token(__lowercase ): self.encoder_size + i for i, lang_code in enumerate(__lowercase )
}
SCREAMING_SNAKE_CASE__ : Optional[int] ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowercase )}
SCREAMING_SNAKE_CASE__ : Optional[Any] ={v: k for k, v in self.lang_token_to_id.items()}
SCREAMING_SNAKE_CASE__ : List[Any] =src_lang if src_lang is not None else '''en'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =tgt_lang
SCREAMING_SNAKE_CASE__ : str =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
SCREAMING_SNAKE_CASE__ : Optional[Any] =num_madeup_words
@property
def __magic_name__ ( self : str ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __magic_name__ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self : Any , __lowercase : str ) -> List[str]:
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def __magic_name__ ( self : Any , __lowercase : int ) -> List[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__lowercase , self.encoder[self.unk_token] )
def __magic_name__ ( self : List[str] , __lowercase : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__lowercase , self.unk_token )
def __magic_name__ ( self : Any , __lowercase : int ) -> Dict:
SCREAMING_SNAKE_CASE__ : int =[]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
SCREAMING_SNAKE_CASE__ : int =[]
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowercase )) + suffix_ones
return prefix_ones + ([0] * len(__lowercase )) + ([0] * len(__lowercase )) + suffix_ones
def __magic_name__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Dict =None
return state
def __setstate__( self : Tuple , __lowercase : Dict ) -> None:
SCREAMING_SNAKE_CASE__ : int =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] ={}
SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ : List[Any] =Path(__lowercase )
if not save_dir.is_dir():
raise OSError(F"{save_directory} should be a directory" )
SCREAMING_SNAKE_CASE__ : Any =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
SCREAMING_SNAKE_CASE__ : Any =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowercase )
elif not os.path.isfile(self.spm_file ):
with open(__lowercase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ : List[str] =self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (str(__lowercase ), str(__lowercase ))
def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : str = "en" , __lowercase : Optional[List[str]] = None , __lowercase : str = "ro" , **__lowercase : Any , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =src_lang
SCREAMING_SNAKE_CASE__ : List[str] =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase )
def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Optional[str] , __lowercase : Optional[str] , **__lowercase : List[Any] ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE__ : Any =src_lang
SCREAMING_SNAKE_CASE__ : List[Any] =self(__lowercase , add_special_tokens=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_lang_id(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tgt_lang_id
return inputs
def __magic_name__ ( self : Any ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self : Any ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : Dict =self.get_lang_token(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cur_lang_id]
SCREAMING_SNAKE_CASE__ : Optional[int] =[self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> None:
SCREAMING_SNAKE_CASE__ : List[Any] =self.get_lang_token(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.lang_token_to_id[lang_token]
SCREAMING_SNAKE_CASE__ : str =[self.cur_lang_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.eos_token_id]
def __magic_name__ ( self : Union[str, Any] , __lowercase : str ) -> str:
return self.lang_code_to_token[lang]
def __magic_name__ ( self : Any , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_lang_token(__lowercase )
return self.lang_token_to_id[lang_token]
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Dict[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =sentencepiece.SentencePieceProcessor(**UpperCamelCase__ )
spm.Load(str(UpperCamelCase__ ) )
return spm
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''r''' ) as f:
return json.load(UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=2 )
| 296
| 0
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple = 16 ) -> Dict:
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase__ : Any ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
__lowerCAmelCase , padding='longest' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['train'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A = mocked_dataloaders # noqa: F811
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowerCAmelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['lr']
__lowerCamelCase = int(config['num_epochs'] )
__lowerCamelCase = int(config['seed'] )
__lowerCamelCase = int(config['batch_size'] )
__lowerCamelCase = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**__lowerCAmelCase )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**__lowerCAmelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(__lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __lowerCAmelCase )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 712
|
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = np.shape(UpperCamelCase__ )
if rows != columns:
__lowerCamelCase = (
'\'table\' has to be of square shaped array but got a '
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(UpperCamelCase__ )
__lowerCamelCase = np.zeros((rows, columns) )
__lowerCamelCase = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
__lowerCamelCase = (table[i][j] - total) / upper[j][j]
__lowerCamelCase = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
__lowerCamelCase = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
__lowerCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 0
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( UpperCAmelCase_ ):
UpperCamelCase_ : Optional[Any] = (EulerDiscreteScheduler,)
UpperCamelCase_ : Union[str, Any] = 10
def _SCREAMING_SNAKE_CASE ( self : str , **snake_case__ : Tuple ):
lowerCAmelCase__ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a__ )
return config
def _SCREAMING_SNAKE_CASE ( self : Dict ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(a__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(a__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**a__ , use_karras_sigmas=a__ )
scheduler.set_timesteps(self.num_inference_steps , device=a__ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ = sample.to(a__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(a__ , a__ )
lowerCAmelCase__ = model(a__ , a__ )
lowerCAmelCase__ = scheduler.step(a__ , a__ , a__ , generator=a__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(a__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 644
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def __snake_case ( *a__ : List[Any] , **a__ : Optional[int] ):
pass
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> str:
"""simple docstring"""
UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Image ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.array(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = npimg.shape
return {"hash": hashimage(SCREAMING_SNAKE_CASE_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_lowerCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __snake_case ( self : Union[str, Any] , a__ : Optional[int] , a__ : Dict , a__ : int ):
UpperCAmelCase = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __snake_case ( self : int , a__ : Dict , a__ : Tuple ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __snake_case ( self : str ):
pass
@slow
@require_torch
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
UpperCAmelCase = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_967},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_909},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_879},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_834},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_716},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_612},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_599},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_552},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_532},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_516},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_499},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_483},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_464},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_408},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_335},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_326},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_262},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_999},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_986},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_984},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_873},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __snake_case ( self : Dict ):
UpperCAmelCase = '''facebook/sam-vit-huge'''
UpperCAmelCase = pipeline('''mask-generation''' , model=a__ )
UpperCAmelCase = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
UpperCAmelCase = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_444},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_210},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_167},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_132},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_053},
] , )
| 51
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int = 4_000_000 ) -> int:
"""simple docstring"""
a : List[Any] = []
a , a : Tuple = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case )
a , a : str = b, a + b
return sum(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 610
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : int = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
UpperCamelCase : List[str] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
UpperCamelCase : Tuple = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a : Optional[Any] = simple_accuracy(snake_case , snake_case )
a : Dict = float(fa_score(y_true=snake_case , y_pred=snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = np.array(snake_case )
a : Any = np.array(snake_case )
a : Tuple = en_sentvecs.shape[0]
# mean centering
a : Tuple = en_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[Any] = in_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[int] = cdist(snake_case , snake_case , 'cosine' )
a : List[Any] = np.array(range(snake_case ) )
a : str = sim.argsort(axis=1 )[:, :10]
a : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 610
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A( a , unittest.TestCase ):
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> List[Any]:
'''simple docstring'''
__a = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCamelCase ) )
__a = np.random.RandomState(__lowerCamelCase )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
__a = pipe(**self.get_dummy_inputs() )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = self.get_dummy_inputs()
__a = pipe(**__lowerCamelCase ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = ort.SessionOptions()
__a = False
return options
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = '''A fantasy landscape, trending on artstation'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCamelCase , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a = init_image.resize((768, 512) )
__a = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__a = '''A fantasy landscape, trending on artstation'''
__a = np.random.RandomState(0 )
__a = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCamelCase , output_type='''np''' , )
__a = output.images
__a = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 219
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : str=3 , __lowerCamelCase : List[Any]=3_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=[8, 1_6, 3_2, 6_4] , __lowerCamelCase : Union[str, Any]=[1, 1, 2, 1] , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=["stage2", "stage3", "stage4"] , __lowerCamelCase : List[Any]=[2, 3, 4] , __lowerCamelCase : int=1 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = embeddings_size
_SCREAMING_SNAKE_CASE = hidden_sizes
_SCREAMING_SNAKE_CASE = depths
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = out_features
_SCREAMING_SNAKE_CASE = out_indices
_SCREAMING_SNAKE_CASE = num_groups
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( A , A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
def check_hidden_states_output(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : int ):
_SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE = layer_type
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
_SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (BitBackbone,) if is_torch_available() else ()
lowerCamelCase_ = BitConfig
lowerCamelCase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BitModelTester(self )
| 418
| 0
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
A = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
A = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
A = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ), homepage='https://github.com/hendrycks/math', codebase_urls=['https://github.com/hendrycks/math'], )
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : List[str] = 0.0
for i, j in zip(A, A ):
n_correct += 1.0 if math_equivalence.is_equiv(A, A ) else 0.0
lowerCamelCase : str = n_correct / len(A )
return {
"accuracy": accuracy,
}
| 449
|
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
A = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
A = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase ( UpperCAmelCase__ : tuple):
return x[0]
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = get_letter_count(UpperCAmelCase__)
lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(UpperCAmelCase__)
lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=UpperCAmelCase__)
lowerCamelCase : int = ''.join(freq_to_letter[freq])
lowerCamelCase : Any = list(freq_to_letter_str.items())
freq_pairs.sort(key=UpperCAmelCase__ , reverse=UpperCAmelCase__)
lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Optional[Any] = get_frequency_order(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def a ( __snake_case : list[float] ):
'''simple docstring'''
return np.maximum(0, __snake_case )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 608
|
"""simple docstring"""
__lowerCamelCase = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
def a ( __snake_case : dict, __snake_case : str, __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ :List[Any] = set()
# keep track of all the paths to be checked
UpperCAmelCase_ :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase_ :Tuple = queue.pop(0 )
# get the last node from the path
UpperCAmelCase_ :str = path[-1]
if node not in explored:
UpperCAmelCase_ :Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase_ :Union[str, Any] = list(__snake_case )
new_path.append(__snake_case )
queue.append(__snake_case )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__snake_case )
# in case there's no path between the 2 nodes
return []
def a ( __snake_case : dict, __snake_case : int, __snake_case : str ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase_ :Optional[Any] = [start]
UpperCAmelCase_ :str = set(__snake_case )
# Keep tab on distances from `start` node.
UpperCAmelCase_ :Optional[Any] = {start: 0, target: -1}
while queue:
UpperCAmelCase_ :Optional[Any] = queue.pop(0 )
if node == target:
UpperCAmelCase_ :str = (
dist[node] if dist[target] == -1 else min(dist[target], dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__snake_case )
queue.append(__snake_case )
UpperCAmelCase_ :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
| 608
| 1
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCamelCase_ ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
snake_case__ : str = "EncodecFeatureExtractor"
snake_case__ : Tuple = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> Dict:
super().__init__(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = self.feature_extractor
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : List[Any]=True ) -> List[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : int ) -> Optional[int]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
__SCREAMING_SNAKE_CASE = kwargs.pop("audio" , __snake_case )
__SCREAMING_SNAKE_CASE = kwargs.pop("sampling_rate" , __snake_case )
__SCREAMING_SNAKE_CASE = kwargs.pop("text" , __snake_case )
if len(__snake_case ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(__snake_case , **__snake_case )
if audio is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__SCREAMING_SNAKE_CASE = audio_inputs['''input_values''']
if "padding_mask" in audio_inputs:
__SCREAMING_SNAKE_CASE = audio_inputs['''padding_mask''']
return inputs
def UpperCAmelCase_ ( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = kwargs.pop("audio" , __snake_case )
__SCREAMING_SNAKE_CASE = kwargs.pop("padding_mask" , __snake_case )
if len(__snake_case ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if audio_values is not None:
return self._decode_audio(__snake_case , padding_mask=__snake_case )
else:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self : str , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : int ) -> Dict:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional = None ) -> int:
__SCREAMING_SNAKE_CASE = to_numpy(__snake_case )
__SCREAMING_SNAKE_CASE = audio_values.shape
if padding_mask is None:
return list(__snake_case )
__SCREAMING_SNAKE_CASE = to_numpy(__snake_case )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__SCREAMING_SNAKE_CASE = seq_len - padding_mask.shape[-1]
__SCREAMING_SNAKE_CASE = 1 - self.feature_extractor.padding_value
__SCREAMING_SNAKE_CASE = np.pad(__snake_case , ((0, 0), (0, difference)) , "constant" , constant_values=__snake_case )
__SCREAMING_SNAKE_CASE = audio_values.tolist()
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__SCREAMING_SNAKE_CASE = sliced_audio.reshape(__snake_case , -1 )
return audio_values
| 705
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a__ : Dict = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
def constraint_to_multiple_of(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=0 , lowerCAmelCase_=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=lowerCAmelCase_ )
return (new_height, new_width)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = ["pixel_values"]
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 3_8_4, "width": 3_8_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Dict , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> Any:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Tuple] = None ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(UpperCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 553
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class __SCREAMING_SNAKE_CASE( a_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_UpperCAmelCase = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCAmelCase = Features({"text": Value("string" )} )
_UpperCAmelCase = Features({"summary": Value("string" )} )
_UpperCAmelCase = "text"
_UpperCAmelCase = "summary"
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 328
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowercase : str = logging.getLogger(__name__)
class a__ ( lowerCAmelCase__ ):
_A = "masked_bert"
def __init__( self : Any , A_ : int=3_05_22 , A_ : Union[str, Any]=7_68 , A_ : Union[str, Any]=12 , A_ : Optional[int]=12 , A_ : str=30_72 , A_ : Optional[int]="gelu" , A_ : Dict=0.1 , A_ : Union[str, Any]=0.1 , A_ : List[str]=5_12 , A_ : str=2 , A_ : Dict=0.02 , A_ : Union[str, Any]=1e-12 , A_ : Optional[int]=0 , A_ : Dict="topK" , A_ : List[Any]="constant" , A_ : List[str]=0.0 , **A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_: List[Any] = vocab_size
lowerCamelCase_: Union[str, Any] = hidden_size
lowerCamelCase_: Optional[Any] = num_hidden_layers
lowerCamelCase_: Dict = num_attention_heads
lowerCamelCase_: int = hidden_act
lowerCamelCase_: int = intermediate_size
lowerCamelCase_: List[Any] = hidden_dropout_prob
lowerCamelCase_: Tuple = attention_probs_dropout_prob
lowerCamelCase_: str = max_position_embeddings
lowerCamelCase_: Dict = type_vocab_size
lowerCamelCase_: Union[str, Any] = initializer_range
lowerCamelCase_: List[Any] = layer_norm_eps
lowerCamelCase_: Any = pruning_method
lowerCamelCase_: Optional[int] = mask_init
lowerCamelCase_: Optional[int] = mask_scale
| 712
|
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( _UpperCAmelCase = 1_0_0_0_0_0_0 , _UpperCAmelCase = 1_0 ):
lowerCamelCase_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCamelCase_: Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCamelCase_: List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 584
| 0
|
import tensorflow as tf
from ...tf_utils import shape_list
class A__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=1 , lowercase=False , **lowercase) -> Tuple:
'''simple docstring'''
super().__init__(**A_)
a__ : Tuple = vocab_size
a__ : Optional[Any] = d_embed
a__ : Optional[Any] = d_proj
a__ : Tuple = cutoffs + [vocab_size]
a__ : List[Any] = [0] + self.cutoffs
a__ : Optional[int] = div_val
a__ : Any = self.cutoffs[0]
a__ : Dict = len(self.cutoffs) - 1
a__ : List[Any] = self.shortlist_size + self.n_clusters
a__ : Optional[int] = keep_order
a__ : Optional[int] = []
a__ : Tuple = []
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
if self.n_clusters > 0:
a__ : List[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=A_ , name='cluster_weight')
a__ : Optional[int] = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=A_ , name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
a__ : Optional[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=A_ , name=F'out_projs_._{i}' , )
self.out_projs.append(A_)
else:
self.out_projs.append(A_)
a__ : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=A_ , name=F'out_layers_._{i}_._weight' , )
a__ : Dict = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=A_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
a__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
a__ : Any = self.d_embed // (self.div_val**i)
a__ : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=A_ , name=F'out_projs_._{i}')
self.out_projs.append(A_)
a__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=A_ , name=F'out_layers_._{i}_._weight' , )
a__ : Tuple = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=A_ , name=F'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias))
super().build(A_)
@staticmethod
def __lowercase ( lowercase , lowercase , lowercase , lowercase=None) -> str:
'''simple docstring'''
a__ : List[Any] = x
if proj is not None:
a__ : Any = tf.einsum('ibd,ed->ibe' , A_ , A_)
return tf.einsum('ibd,nd->ibn' , A_ , A_) + b
@staticmethod
def __lowercase ( lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = shape_list(A_)
a__ : List[str] = tf.range(lp_size[0] , dtype=target.dtype)
a__ : Tuple = tf.stack([r, target] , 1)
return tf.gather_nd(A_ , A_)
def __lowercase ( self , lowercase , lowercase , lowercase=True , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = 0
if self.n_clusters == 0:
a__ : List[Any] = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0])
if target is not None:
a__ : Any = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_)
a__ : Dict = tf.nn.log_softmax(A_ , axis=-1)
else:
a__ : List[str] = shape_list(A_)
a__ : Optional[int] = []
a__ : int = tf.zeros(hidden_sizes[:2])
for i in range(len(self.cutoffs)):
a__ : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
a__ : Any = (target >= l_idx) & (target < r_idx)
a__ : Tuple = tf.where(A_)
a__ : Any = tf.boolean_mask(A_ , A_) - l_idx
if self.div_val == 1:
a__ : Tuple = self.out_layers[0][0][l_idx:r_idx]
a__ : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
a__ : int = self.out_layers[i][0]
a__ : Any = self.out_layers[i][1]
if i == 0:
a__ : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0)
a__ : Optional[int] = tf.concat([cur_b, self.cluster_bias] , 0)
a__ : Tuple = self._logit(A_ , A_ , A_ , self.out_projs[0])
a__ : Optional[int] = tf.nn.log_softmax(A_)
out.append(head_logprob[..., : self.cutoffs[0]])
if target is not None:
a__ : List[str] = tf.boolean_mask(A_ , A_)
a__ : Dict = self._gather_logprob(A_ , A_)
else:
a__ : Tuple = self._logit(A_ , A_ , A_ , self.out_projs[i])
a__ : Union[str, Any] = tf.nn.log_softmax(A_)
a__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
a__ : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_)
if target is not None:
a__ : Tuple = tf.boolean_mask(A_ , A_)
a__ : List[Any] = tf.boolean_mask(A_ , A_)
a__ : Optional[int] = self._gather_logprob(A_ , A_)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_))
a__ : List[str] = tf.concat(A_ , axis=-1)
if target is not None:
if return_mean:
a__ : List[str] = tf.reduce_mean(A_)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation='mean' if return_mean else '')
return out
| 302
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_ = random.Random()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: Dict=1.0 , lowerCAmelCase: Any=None , lowerCAmelCase: Any=None ) -> Optional[int]:
if rng is None:
_UpperCAmelCase : int = global_rng
_UpperCAmelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class a ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : List[str] = min_seq_length
_UpperCAmelCase : Any = max_seq_length
_UpperCAmelCase : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Optional[Any] = padding_value
_UpperCAmelCase : Optional[int] = sampling_rate
_UpperCAmelCase : Tuple = return_attention_mask
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Optional[Any] = feature_size
_UpperCAmelCase : int = chunk_length
_UpperCAmelCase : str = hop_length
def _UpperCAmelCase ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _UpperCAmelCase ( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
_UpperCAmelCase : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class a ( UpperCAmelCase , unittest.TestCase ):
_lowercase = WhisperFeatureExtractor if is_speech_available() else None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = WhisperFeatureExtractionTester(self )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Tuple = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
_UpperCAmelCase : Any = self.feature_extraction_class.from_pretrained(A_ )
_UpperCAmelCase : Union[str, Any] = feat_extract_first.to_dict()
_UpperCAmelCase : List[str] = feat_extract_second.to_dict()
_UpperCAmelCase : Optional[int] = feat_extract_first.mel_filters
_UpperCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : int = os.path.join(A_ , "feat_extract.json" )
feat_extract_first.to_json_file(A_ )
_UpperCAmelCase : List[str] = self.feature_extraction_class.from_json_file(A_ )
_UpperCAmelCase : List[str] = feat_extract_first.to_dict()
_UpperCAmelCase : Any = feat_extract_second.to_dict()
_UpperCAmelCase : Any = feat_extract_first.mel_filters
_UpperCAmelCase : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase : Tuple = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : str = feature_extractor(A_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
_UpperCAmelCase : Tuple = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : List[str] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase : Optional[int] = np.asarray(A_ )
_UpperCAmelCase : List[Any] = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : Union[str, Any] = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase : Tuple = [np.asarray(A_ ) for speech_input in speech_inputs]
_UpperCAmelCase : int = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase : Optional[int] = [np.asarray(A_ ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase : int = feature_extractor(A_ , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(A_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
import torch
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : int = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : str = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : Union[str, Any] = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_UpperCAmelCase : Optional[int] = WhisperFeatureExtractor()
_UpperCAmelCase : Any = feature_extractor(A_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1e-4 ) )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Optional[Any] = self._load_datasamples(1 )[0]
_UpperCAmelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1e-3 ) )
| 300
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Optional[int] = CLIPTokenizer
_A : Dict = CLIPTokenizerFast
_A : int = True
_A : Union[str, Any] = {}
_A : Optional[int] = False
def lowerCamelCase(self ):
super().setUp()
# fmt: off
A_ : Dict = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
A_ : Dict = {"""unk_token""": """<unk>"""}
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : Union[str, Any] = """lower newer"""
A_ : Tuple = """lower newer"""
return input_text, output_text
def lowerCamelCase(self ):
A_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : int = """lower newer"""
A_ : List[str] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
A_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = tokens + [tokenizer.unk_token]
A_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
@require_ftfy
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : int = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : List[str] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
A_ : Dict = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A_ : Dict = """xa\u0303y""" + """ """ + """x\xe3y"""
A_ : Optional[Any] = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Tuple = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of space type
A_ : Optional[int] = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A_ : Any = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : str = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that the tokenization is identical on unicode of line break type
A_ : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A_ : int = tokenizer_s.tokenize(lowerCAmelCase_ )
A_ : Optional[Any] = tokenizer_r.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase(self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Dict = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A_ : Dict = f"""{text_of_1_token} {text_of_1_token}"""
A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A_ : Any = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
A_ : Any = f""" {text}"""
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase_ , use_fast=lowerCAmelCase_ , )
A_ : str = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
def lowerCamelCase(self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase_ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCamelCase(self ):
super().test_tokenization_python_rust_equals()
def lowerCamelCase(self ):
# CLIP always lower cases letters
pass
| 480
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Optional[int] = get_activation("""swish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Dict = get_activation("""silu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Optional[Any] = get_activation("""mish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : int = get_activation("""gelu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 480
| 1
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Union[str, Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Dict = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_snake_case : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_snake_case : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_snake_case : Dict = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_snake_case : Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_snake_case : int = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_snake_case : Tuple = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRContextEncoderTokenizer
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRQuestionEncoderTokenizer
_snake_case : Tuple = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_snake_case : Optional[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_snake_case : Any = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCamelCase )
class _UpperCAmelCase :
"""simple docstring"""
def __call__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Union[bool, str] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Optional[Any] , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = titles if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [titles]
__lowerCAmelCase = texts if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [texts]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = questions if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else [questions] * n_passages
assert len(lowerCAmelCase_ ) == len(
lowerCAmelCase_ ), f"""There should be as many titles than texts but got {len(lowerCAmelCase_ )} titles and {len(lowerCAmelCase_ )} texts."""
__lowerCAmelCase = super().__call__(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )['input_ids']
__lowerCAmelCase = super().__call__(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ )['input_ids']
__lowerCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : BatchEncoding , lowerCAmelCase_ : DPRReaderOutput , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 6_4 , lowerCAmelCase_ : int = 4 , ) -> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input['input_ids']
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = sorted(range(lowerCAmelCase_ ) , reverse=lowerCAmelCase_ , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(lowerCAmelCase_ )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase_ , top_spans=lowerCAmelCase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase_ , start_index=lowerCAmelCase_ , end_index=lowerCAmelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , ) -> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(lowerCAmelCase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase )
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = READER_PRETRAINED_VOCAB_FILES_MAP
a_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = READER_PRETRAINED_INIT_CONFIGURATION
a_ = ["""input_ids""", """attention_mask"""]
a_ = DPRReaderTokenizer
| 53
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case : Optional[int] = logging.getLogger(__name__)
_snake_case : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowercase ( self : List[Any] ) -> List[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a_ = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a_ = field(
default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a_ = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a_ = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowercase ( self : int ) -> int:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ):
with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f:
__lowerCAmelCase = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
__lowerCAmelCase = {c: dataset[c] for c in dataset.column_names}
__lowerCAmelCase = refs
return Dataset.from_dict(lowerCAmelCase_ )
def a_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCAmelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name )
if "validation" not in datasets.keys():
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[:{data_args.validation_split_percentage}%]""", )
__lowerCAmelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=F"""train[{data_args.validation_split_percentage}%:]""", )
else:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
__lowerCAmelCase = 'text'
__lowerCAmelCase = load_dataset(lowerCAmelCase_, data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.config_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
__lowerCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
__lowerCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **lowerCAmelCase_ )
elif model_args.model_name_or_path:
__lowerCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **lowerCAmelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__lowerCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=lowerCAmelCase_, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__lowerCAmelCase = datasets['train'].column_names
else:
__lowerCAmelCase = datasets['validation'].column_names
__lowerCAmelCase = 'text' if 'text' in column_names else column_names[0]
__lowerCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(lowerCAmelCase_ : str ):
# Remove empty lines
__lowerCAmelCase = [line for line in examples['text'] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'], padding=lowerCAmelCase_, truncation=lowerCAmelCase_, max_length=data_args.max_seq_length )
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__lowerCAmelCase = add_chinese_references(tokenized_datasets['train'], data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__lowerCAmelCase = add_chinese_references(
tokenized_datasets['validation'], data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__lowerCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__lowerCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
__lowerCAmelCase = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_, mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCAmelCase_, args=lowerCAmelCase_, train_dataset=tokenized_datasets['train'] if training_args.do_train else None, eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None, tokenizer=lowerCAmelCase_, data_collator=lowerCAmelCase_, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__lowerCAmelCase = model_args.model_name_or_path
else:
__lowerCAmelCase = None
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = os.path.join(training_args.output_dir, 'train_results.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, 'trainer_state.json' ) )
# Evaluation
__lowerCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = math.exp(eval_output['eval_loss'] )
__lowerCAmelCase = perplexity
__lowerCAmelCase = os.path.join(training_args.output_dir, 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def a_ ( lowerCAmelCase_ : Tuple ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 53
| 1
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A = logging.get_logger(__name__)
def __UpperCAmelCase ( __A ) -> List[int]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase__ = tf.shape(lowerCamelCase__ )
if tensor.shape == tf.TensorShape(lowerCamelCase__ ):
return dynamic
UpperCAmelCase__ = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )]
def __UpperCAmelCase ( __A , __A = None , __A = None ) -> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ )
def __UpperCAmelCase ( __A , __A , __A , __A=1E-5 , __A=-1 ) -> str:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
UpperCAmelCase__ = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase__ = [1] * inputs.shape.rank
UpperCAmelCase__ = shape_list(lowerCamelCase__ )[axis]
UpperCAmelCase__ = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__ = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase__ = tf.nn.batch_normalization(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , )
return outputs
def __UpperCAmelCase ( __A , __A=0 , __A=-1 ) -> str:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase__ = tf.shape(lowerCamelCase__ )
UpperCAmelCase__ = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase__ = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
def __UpperCAmelCase ( __A ) -> tf.Tensor:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , tf.Tensor ):
UpperCAmelCase__ = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase__ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase__ = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase__ = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __UpperCAmelCase ( __A , __A , __A = "input_ids" ) -> None:
'''simple docstring'''
tf.debugging.assert_less(
lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __UpperCAmelCase ( __A , __A , __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase__ = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
UpperCAmelCase__ = np.asarray(lowerCamelCase__ )
UpperCAmelCase__ = 1
UpperCAmelCase__ = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase__ = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = chunk_data
else:
UpperCAmelCase__ = data
def __UpperCAmelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
if name in group.attrs:
UpperCAmelCase__ = [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def __UpperCAmelCase ( __A ) -> List[Any]:
'''simple docstring'''
def _expand_single_ad_tensor(__A ):
if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
| 710
|
from __future__ import annotations
class lowercase__ :
def __init__( self : int , _lowercase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_lowercase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.rows )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return bool(self.determinant() )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def _UpperCAmelCase ( self : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Tuple ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_lowercase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_lowercase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , _lowercase : object ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowercase : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowercase : Matrix | int | float ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : list[int] , _lowercase : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = MgpstrTokenizer
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : Optional[Any] = False
def A ( self ) -> Union[str, Any]:
super().setUp()
# fmt: off
a_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a_ : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
a_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
def A ( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self , _SCREAMING_SNAKE_CASE ) -> int:
a_ : Dict = "tester"
a_ : Optional[int] = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def A ( self ) -> Dict:
pass
def A ( self ) -> Optional[Any]:
a_ : Any = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ : Optional[Any] = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
a_ : int = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
a_ : Dict = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def A ( self ) -> str:
a_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
a_ , a_ : Any = self.get_input_output_texts(UpperCamelCase__ )
a_ : Dict = tokenizer.tokenize(UpperCamelCase__ )
a_ : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
a_ : Dict = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
a_ : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
a_ : Union[str, Any] = tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(" " , "" ) , UpperCamelCase__ )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def A ( self ) -> Union[str, Any]:
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def A ( self ) -> List[str]:
pass
| 473
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __UpperCamelCase ( lowercase_ : Dict ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def __UpperCamelCase ( lowercase_ : List[Any] ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
a_ = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 536
| 0
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def _UpperCAmelCase ( _UpperCamelCase : np.ndarray, _UpperCamelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
A_ = math.sqrt(_UpperCamelCase )
A_ = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _UpperCAmelCase ( _UpperCamelCase : np.ndarray, _UpperCamelCase : int, _UpperCamelCase : int, _UpperCamelCase : int ) -> np.ndarray:
A_ = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
A_ = np.zeros((kernel_size, kernel_size) )
for i in range(0, _UpperCamelCase ):
for j in range(0, _UpperCamelCase ):
A_ = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : np.ndarray, _UpperCamelCase : float, _UpperCamelCase : float, _UpperCamelCase : int, ) -> np.ndarray:
A_ = np.zeros(img.shape )
A_ = get_gauss_kernel(_UpperCamelCase, _UpperCamelCase )
A_ ,A_ = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
A_ = get_slice(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
A_ = img_s - img_s[kernel_size // 2, kernel_size // 2]
A_ = vec_gaussian(_UpperCamelCase, _UpperCamelCase )
A_ = np.multiply(_UpperCamelCase, _UpperCamelCase )
A_ = np.multiply(_UpperCamelCase, _UpperCamelCase )
A_ = np.sum(_UpperCamelCase ) / np.sum(_UpperCamelCase )
A_ = val
return imga
def _UpperCAmelCase ( _UpperCamelCase : list ) -> tuple:
A_ = args[1] if args[1:] else '''../image_data/lena.jpg'''
A_ = float(args[2] ) if args[2:] else 1.0
A_ = float(args[3] ) if args[3:] else 1.0
if args[4:]:
A_ = int(args[4] )
A_ = kernel_size + abs(kernel_size % 2 - 1 )
else:
A_ = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
__snake_case , __snake_case , __snake_case , __snake_case : str = parse_args(sys.argv)
__snake_case : Any = cva.imread(filename, 0)
cva.imshow('input image', img)
__snake_case : Dict = img / 255
__snake_case : List[str] = out.astype('float32')
__snake_case : List[str] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
__snake_case : Dict = out * 255
__snake_case : int = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 174
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case : Optional[Any] = 'pt'
elif is_tf_available():
__snake_case : List[Any] = 'tf'
else:
__snake_case : Tuple = 'jax'
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = PerceiverTokenizer
__lowercase : str = False
def __A ( self ) -> List[str]:
super().setUp()
A_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __A ( self ) -> Optional[int]:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def __A ( self , **_SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A_ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
A_ = tokenizer.decode([i] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A_ = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , _SCREAMING_SNAKE_CASE ) )
A_ = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(_SCREAMING_SNAKE_CASE ) > max_length:
A_ = toks[:max_length]
if min_length is not None and len(_SCREAMING_SNAKE_CASE ) < min_length and len(_SCREAMING_SNAKE_CASE ) > 0:
while len(_SCREAMING_SNAKE_CASE ) < min_length:
A_ = toks + toks
# toks_str = [t[1] for t in toks]
A_ = [t[0] for t in toks]
# Ensure consistency
A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(_SCREAMING_SNAKE_CASE ) > 1:
A_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
A_ = ''' ''' + output_txt
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def __A ( self ) -> int:
A_ = self.perceiver_tokenizer
A_ = '''Unicode €.'''
A_ = tokenizer(_SCREAMING_SNAKE_CASE )
A_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _SCREAMING_SNAKE_CASE )
# decoding
A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''[CLS]Unicode €.[SEP]''' )
A_ = tokenizer('''e è é ê ë''' )
A_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _SCREAMING_SNAKE_CASE )
# decoding
A_ = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def __A ( self ) -> str:
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
A_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
A_ = list(batch.input_ids.numpy()[0] )
else:
A_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __A ( self ) -> Any:
A_ = self.perceiver_tokenizer
A_ = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A_ = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _SCREAMING_SNAKE_CASE )
self.assertIn('''attention_mask''' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_input_ids''' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('''decoder_attention_mask''' , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
A_ = self.perceiver_tokenizer
A_ = [
'''Summary of the text.''',
'''Another summary.''',
]
A_ = tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __A ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
A_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ = tempfile.mkdtemp()
A_ = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
A_ = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
A_ = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
A_ = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A_ = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
A_ = json.load(_SCREAMING_SNAKE_CASE )
A_ = [F'''<extra_id_{i}>''' for i in range(125 )]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
A_ = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_SCREAMING_SNAKE_CASE )]
A_ = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __A ( self ) -> Tuple:
A_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def __A ( self ) -> str:
pass
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Optional[Any]:
pass
def __A ( self ) -> List[str]:
pass
def __A ( self ) -> List[Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
A_ = self.get_tokenizers(fast=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
A_ = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
A_ = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 174
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 300
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int ) -> list[list[int]]:
_UpperCAmelCase : list[list[int]] = []
create_all_state(1 , lowerCAmelCase , lowerCAmelCase , [] , lowerCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: list[int] , lowerCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCAmelCase , total_number - level + 2 ):
current_list.append(lowerCAmelCase )
create_all_state(i + 1 , lowerCAmelCase , level - 1 , lowerCAmelCase , lowerCAmelCase )
current_list.pop()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 300
| 1
|
from itertools import product
def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
_lowerCamelCase = sides_number
_lowerCamelCase = max_face_number * dice_number
_lowerCamelCase = [0] * (max_total + 1)
_lowerCamelCase = 1
_lowerCamelCase = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
_lowerCamelCase = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
_lowerCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCamelCase = 0
_lowerCamelCase = 9
_lowerCamelCase = 4 * 9
_lowerCamelCase = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCamelCase = (4**9) * (6**6)
_lowerCamelCase = peter_wins_count / total_games_number
_lowerCamelCase = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ) -> tuple[list[int], int]:
_lowerCamelCase = [randint(-10_00 , 10_00 ) for i in range(10 )]
_lowerCamelCase = randint(-50_00 , 50_00 )
return (arr, r)
A = make_dataset()
def lowerCamelCase ( UpperCamelCase : list[int] , UpperCamelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCamelCase , 3 ):
if sum(UpperCamelCase ) == target:
return tuple(sorted(UpperCamelCase ) )
return (0, 0, 0)
def lowerCamelCase ( UpperCamelCase : list[int] , UpperCamelCase : int ) -> tuple[int, int, int]:
arr.sort()
_lowerCamelCase = len(UpperCamelCase )
for i in range(n - 1 ):
_lowerCamelCase , _lowerCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase ( ) -> tuple[float, float]:
_lowerCamelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_lowerCamelCase = '\ntriplet_sum1(*dataset)\n'
_lowerCamelCase = '\ntriplet_sum2(*dataset)\n'
_lowerCamelCase = repeat(setup=UpperCamelCase , stmt=UpperCamelCase , repeat=5 , number=1_00_00 )
_lowerCamelCase = repeat(setup=UpperCamelCase , stmt=UpperCamelCase , repeat=5 , number=1_00_00 )
return (min(UpperCamelCase ), min(UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 234
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( __snake_case ):
def __a ( self ):
_lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase="swish" , _lowerCAmelCase=3 , _lowerCAmelCase=3_2 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1_0 , _lowerCAmelCase=None , _lowerCAmelCase=0.25 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , ):
_lowercase : List[Any] = parent
_lowercase : str = batch_size
_lowercase : Tuple = image_size
_lowercase : int = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : int = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_lowercase : Optional[Any] = hidden_act
_lowercase : Any = conv_kernel_size
_lowercase : List[str] = output_stride
_lowercase : Tuple = classifier_dropout_prob
_lowercase : Union[str, Any] = use_labels
_lowercase : Optional[int] = is_training
_lowercase : Optional[int] = num_labels
_lowercase : Optional[Any] = initializer_range
_lowercase : Any = scope
_lowercase : List[str] = width_multiplier
_lowercase : int = ffn_dropout
_lowercase : List[Any] = attn_dropout
def __a ( self ):
_lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Union[str, Any] = None
_lowercase : int = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Optional[int] = MobileViTVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : str = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = MobileViTVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Dict = self.num_labels
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowercase : List[Any] = model(_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowercase : Optional[int] = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self ):
_lowercase : Dict = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = config_and_inputs
_lowercase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
def __a ( self ):
_lowercase : Optional[int] = MobileViTVaModelTester(self )
_lowercase : Optional[int] = MobileViTVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def __a ( self ):
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def __a ( self ):
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def __a ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def __a ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __a ( self ):
pass
def __a ( self ):
_lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Any = model_class(_lowerCAmelCase )
_lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[str] = [*signature.parameters.keys()]
_lowercase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def __a ( self ):
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def __a ( self ):
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase : Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : Union[str, Any] = outputs.hidden_states
_lowercase : Optional[Any] = 5
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[Any] = 2
for i in range(len(_lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Union[str, Any] = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def __a ( self ):
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCAmelCase )
@slow
def __a ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Any = MobileViTVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __magic_name__ ( ) -> List[str]:
_lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def __a ( self ):
_lowercase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
_lowerCAmelCase )
_lowercase : Dict = self.default_image_processor
_lowercase : Dict = prepare_img()
_lowercase : Dict = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
# verify the logits
_lowercase : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowercase : int = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ):
_lowercase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : Optional[Any] = model.to(_lowerCAmelCase )
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : List[Any] = prepare_img()
_lowercase : List[Any] = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : str = model(**_lowerCAmelCase )
_lowercase : Dict = outputs.logits
# verify the logits
_lowercase : Dict = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : str = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] , device=_lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def __a ( self ):
_lowercase : int = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : Optional[Any] = model.to(_lowerCAmelCase )
_lowercase : Tuple = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowercase : str = prepare_img()
_lowercase : Dict = image_processor(images=_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : int = model(**_lowerCAmelCase )
_lowercase : int = outputs.logits.detach().cpu()
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase , target_sizes=[(5_0, 6_0)] )
_lowercase : Optional[Any] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_lowerCAmelCase )
_lowercase : List[str] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _lowerCAmelCase )
| 66
|
import random
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def lowerCamelCase_ ( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or index < 0:
return None
UpperCamelCase__ = items[random.randint(0, len(UpperCamelCase__ ) - 1 )]
UpperCamelCase__ = 0
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = _partition(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = len(UpperCamelCase__ )
UpperCamelCase__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__, UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__, index - (m + count) )
| 240
| 0
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
a__ : Optional[Any] = set(range(3 , lowerCAmelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase__ , lowerCAmelCase__ ) ) )
a__ : Dict = [float(lowerCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase__ , limit + 1 , lowerCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 714
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : Optional[Any] = WavaVecaPhonemeCTCTokenizer
__lowerCamelCase : int = False
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
a__ : str = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
a__ : Any = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
def UpperCAmelCase ( self : Optional[int] , a_ : Union[str, Any] , a_ : Tuple=False , a_ : Optional[Any]=20 , a_ : List[Any]=5 ) -> Tuple[str, list]:
'''simple docstring'''
a__ : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=a_ )) for i in range(len(a_ ) )]
a__ : List[str] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=a_ ) , a_ ) )
if max_length is not None and len(a_ ) > max_length:
a__ : int = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
a__ : str = toks + toks
# toks_str = [t[1] for t in toks]
a__ : int = [t[0] for t in toks]
# Ensure consistency
a__ : List[Any] = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
a__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a_ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
a__ : Union[str, Any] = " " + output_txt
a__ : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ )
return output_txt, output_ids
def UpperCAmelCase ( self : Optional[int] , **a_ : Tuple ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
a__ : int = tokenizer("m xxx ɪ" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
a__ : Union[str, Any] = tokenizer("m aaa ɪ ccc" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
a__ : Tuple = tokenizer("maɪ c" , do_phonemize=a_ ).input_ids
self.assertEqual(a_ , [3, 2_00] ) # mai should be <unk> (=3)
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Union[str, Any] = "Hello how are you"
a__ : List[Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(a_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
def UpperCAmelCase ( self : str ) -> Any:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Optional[int] = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(a_ ).input_ids , tokenizer(a_ , do_phonemize=a_ ).input_ids )
def UpperCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Any = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : Optional[int] = tokenizer.decode(tokenizer(a_ ).input_ids )
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
a__ : List[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Union[str, Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
a__ : Union[str, Any] = tokenizer.decode(sample_ids[0] )
a__ : Tuple = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def UpperCAmelCase ( self : Dict ) -> Any:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : List[Any] = "Hello how are you"
a__ : Union[str, Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(a_ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Any = "Hello how are you"
a__ : Optional[int] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(a_ ).input_ids , tokenizer(a_ , do_phonemize=a_ ).input_ids )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
a__ : List[str] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
a__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
a__ : List[Any] = tokenizer.decode(sample_ids[0] )
a__ : Tuple = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
a__ : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=a_ )
a__ : Dict = tokenizer.batch_decode(a_ , filter_word_delimiter_token=a_ )
self.assertEqual(a_ , batch_tokens[0] )
self.assertEqual(a_ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Optional[Any] = "Hello how are you"
a__ : Union[str, Any] = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : str = tokenizer.decode(tokenizer(a_ ).input_ids , filter_word_delimiter_token=a_ )
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
a__ : Optional[int] = "Hello how are you"
a__ : str = tokenizer.phonemize(a_ , phonemizer_lang="en-us" )
a__ : Dict = tokenizer.decode(tokenizer(a_ ).input_ids , filter_word_delimiter_token=a_ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , a_ )
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=a_ )
a__ : List[str] = "Hello how are you"
a__ : Tuple = tokenizer(a_ , phonemizer_lang="en-us" ).input_ids
a__ : Tuple = tokenizer(a_ , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(a_ , a_ )
a__ : int = tokenizer.decode(a_ )
a__ : Tuple = tokenizer.decode(a_ )
self.assertEqual(a_ , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(a_ , "ɛ l o h aʊ a ʁ j u" )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
a__ : Tuple = "Hello how Are you"
a__ : List[Any] = "hello how are you"
a__ : Optional[Any] = tokenizer(a_ ).input_ids
a__ : int = tokenizer(a_ ).input_ids
self.assertEqual(a_ , a_ )
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
a__ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
a__ : Any = tokenizer.batch_decode(a_ )
self.assertEqual(a_ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def UpperCAmelCase ( a_ : Optional[Any] , a_ : List[str] ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
a__ : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
a__ : str = tokenizer.decode(a_ , output_char_offsets=a_ , filter_word_delimiter_token=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(a_ , a_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ : List[str] = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(a_ : Optional[Any] , a_ : Union[str, Any] ):
self.assertTrue(isinstance(a_ , a_ ) )
self.assertTrue(isinstance(outputs_list[0] , a_ ) )
# transform list to ModelOutput
a__ : List[str] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(a_ : int , a_ : str ):
if isinstance(a_ , a_ ):
[recursive_check(a_ , a_ ) for la, la in zip(a_ , a_ )]
self.assertEqual(a_ , a_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
a__ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
a__ : Union[str, Any] = tokenizer.batch_decode(a_ , output_char_offsets=a_ )
a__ : List[Any] = [tokenizer.decode(a_ , output_char_offsets=a_ ) for ids in sample_ids]
check_list_tuples_equal(a_ , a_ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self : List[str] ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
a__ : Optional[int] = tokenizer.vocab_size
a__ : List[Any] = len(a_ )
self.assertNotEqual(a_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a__ : Optional[int] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a__ : List[str] = tokenizer.add_tokens(a_ )
a__ : Tuple = tokenizer.vocab_size
a__ : Dict = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size + len(a_ ) )
a__ : List[str] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
a__ : Tuple = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a__ : List[Any] = tokenizer.add_special_tokens(a_ )
a__ : List[str] = tokenizer.vocab_size
a__ : List[Any] = len(a_ )
self.assertNotEqual(a_ , 0 )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , len(a_ ) )
self.assertEqual(a_ , all_size_a + len(a_ ) )
a__ : List[str] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizers(fast=a_ , do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
a__ : Union[str, Any] = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
a__ : List[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(output["text"] , a_ )
| 251
| 0
|
def lowerCamelCase__ ( _lowercase = 10 , _lowercase = 22 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = range(1 , _lowercase )
UpperCAmelCase_ : Optional[int] = range(1 , _lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 30
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : int = 8 , **_lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase)
__lowercase =do_rescale
__lowercase =rescale_factor
__lowercase =do_pad
__lowercase =pad_size
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : str):
'''simple docstring'''
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None):
'''simple docstring'''
__lowercase , __lowercase =get_image_size(_lowerCAmelCase)
__lowercase =(old_height // size + 1) * size - old_height
__lowercase =(old_width // size + 1) * size - old_width
return pad(_lowerCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_lowerCAmelCase)
def __lowerCamelCase ( self : int , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =do_rescale if do_rescale is not None else self.do_rescale
__lowercase =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase =do_pad if do_pad is not None else self.do_pad
__lowercase =pad_size if pad_size is not None else self.pad_size
__lowercase =make_list_of_images(_lowerCAmelCase)
if not valid_images(_lowerCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
__lowercase =[to_numpy_array(_lowerCAmelCase) for image in images]
if do_rescale:
__lowercase =[self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase) for image in images]
if do_pad:
__lowercase =[self.pad(_lowerCAmelCase , size=_lowerCAmelCase) for image in images]
__lowercase =[to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase) for image in images]
__lowercase ={'pixel_values': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase)
| 474
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCAmelCase_ , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase_ )
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
def lowerCAmelCase ( self ) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCAmelCase_ , )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class UpperCamelCase_ ( _lowerCamelCase ):
@require_beam
def lowerCAmelCase ( self ) -> str:
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCAmelCase_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase_ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCAmelCase ( self ) -> Optional[Any]:
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCAmelCase_ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
_snake_case = partial(lowerCAmelCase_ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase_ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCAmelCase_ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCAmelCase ( self ) -> Any:
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCAmelCase_ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , lowerCAmelCase_ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase_ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase_ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 541
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 255 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 256}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = offset
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> int:
_snake_case = image.astype(np.floataa )
if offset:
_snake_case = image - (scale / 2)
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowerCAmelCase_ )
if do_resize:
_snake_case = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
_snake_case = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
_snake_case = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ , offset=lowerCAmelCase_ )
if do_normalize:
_snake_case = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
_snake_case = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = offset if offset is not None else self.offset
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(lowerCAmelCase_ )
_snake_case = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , offset=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 541
| 1
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 16
SCREAMING_SNAKE_CASE__ : List[Any] = 32
def __magic_name__ ( __lowerCAmelCase : str ) -> Union[str, Any]:
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : str ) -> List[Any]:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__lowerCamelCase = torch.cuda.memory_allocated()
return self
def __exit__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : int ) -> str:
gc.collect()
torch.cuda.empty_cache()
__lowerCamelCase = torch.cuda.memory_allocated()
__lowerCamelCase = torch.cuda.max_memory_allocated()
__lowerCamelCase = bamb(self.end - self.begin )
__lowerCamelCase = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] = 16 , __lowerCAmelCase : Any = "bert-base-cased" , __lowerCAmelCase : str = 320 , __lowerCAmelCase : List[str] = 160 , ) -> Union[str, Any]:
__lowerCamelCase = AutoTokenizer.from_pretrained(_A )
__lowerCamelCase = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f'''train[:{n_train}]''', '''validation''': f'''validation[:{n_val}]'''} )
def tokenize_function(__lowerCAmelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
_A , batched=_A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCAmelCase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(_A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> int:
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = args.model_name_or_path
set_seed(_A )
__lowerCamelCase , __lowerCamelCase = get_dataloaders(_A , _A , _A , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
__lowerCamelCase = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
# Now we train the model
__lowerCamelCase = {}
for epoch in range(_A , _A ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_A ):
__lowerCamelCase = model(**_A )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__lowerCamelCase = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(_A , _A )
def __magic_name__ ( ) -> Tuple:
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_A , )
parser.add_argument(
'''--output_dir''' , type=_A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=_A , default=_A , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=_A , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=_A , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=_A , default=1 , help='''Number of train epochs.''' , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 298
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A_: Union[str, Any] = 5_0000
A_: str = 5000
A_ , A_: int = os.path.split(__file__)
A_: str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
for i in range(0 ,len(_A ) ,_A ):
_lowercase = dataset[i : i + batch_size]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(_A ):
_lowercase = dataset[i]
@get_duration
def __lowerCAmelCase ( _A ,_A ,_A ,_A ):
"""simple docstring"""
with dataset.formatted_as(type=_A ):
for i in range(0 ,_A ,_A ):
_lowercase = dataset[i : i + batch_size]
def __lowerCAmelCase ( ):
"""simple docstring"""
_lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
_lowercase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowercase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowercase = generate_example_dataset(
os.path.join(_A ,"""dataset.arrow""" ) ,_A ,num_examples=_A ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_A ) )
_lowercase = func(_A ,**_A )
print("""shuffling dataset""" )
_lowercase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_A ) )
_lowercase = func(
_A ,**_A )
with open(_A ,"""wb""" ) as f:
f.write(json.dumps(_A ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 398
| 0
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _a ( *_snake_case ):
"""simple docstring"""
with open(_snake_case , """r""" ) as fh:
fcntl.flock(_snake_case , fcntl.LOCK_EX )
try:
print(*_snake_case )
finally:
fcntl.flock(_snake_case , fcntl.LOCK_UN )
_UpperCamelCase = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
_UpperCamelCase = torch.device("""cuda""", local_rank)
_UpperCamelCase = socket.gethostname()
_UpperCamelCase = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_UpperCamelCase = dist.get_rank()
_UpperCamelCase = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 74
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74
| 1
|
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase__ = logging.get_logger(__name__)
class a__ :
def __init__( self : Union[str, Any] ,a__ : Tuple ,a__ : Optional[Any]) -> int:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = question_encoder
_lowerCAmelCase:int = generator
_lowerCAmelCase:Optional[Any] = self.question_encoder
def __UpperCamelCase ( self : Any ,a__ : Tuple) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(a__):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(a__ ,exist_ok=a__)
_lowerCAmelCase:Tuple = os.path.join(a__ ,'''question_encoder_tokenizer''')
_lowerCAmelCase:Tuple = os.path.join(a__ ,'''generator_tokenizer''')
self.question_encoder.save_pretrained(a__)
self.generator.save_pretrained(a__)
@classmethod
def __UpperCamelCase ( cls : Tuple ,a__ : Any ,**a__ : List[str]) -> str:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase:List[Any] = kwargs.pop('''config''' ,a__)
if config is None:
_lowerCAmelCase:List[str] = RagConfig.from_pretrained(a__)
_lowerCAmelCase:List[Any] = AutoTokenizer.from_pretrained(
a__ ,config=config.question_encoder ,subfolder='''question_encoder_tokenizer''')
_lowerCAmelCase:int = AutoTokenizer.from_pretrained(
a__ ,config=config.generator ,subfolder='''generator_tokenizer''')
return cls(question_encoder=a__ ,generator=a__)
def __call__( self : str ,*a__ : int ,**a__ : Any) -> Union[str, Any]:
"""simple docstring"""
return self.current_tokenizer(*a__ ,**a__)
def __UpperCamelCase ( self : str ,*a__ : Tuple ,**a__ : str) -> Tuple:
"""simple docstring"""
return self.generator.batch_decode(*a__ ,**a__)
def __UpperCamelCase ( self : List[str] ,*a__ : str ,**a__ : List[str]) -> Dict:
"""simple docstring"""
return self.generator.decode(*a__ ,**a__)
def __UpperCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Any = self.question_encoder
def __UpperCamelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.generator
def __UpperCamelCase ( self : Union[str, Any] ,a__ : List[str] ,a__ : Optional[List[str]] = None ,a__ : Optional[int] = None ,a__ : Optional[int] = None ,a__ : str = "longest" ,a__ : str = None ,a__ : bool = True ,**a__ : List[Any] ,) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' ,a__ ,)
if max_length is None:
_lowerCAmelCase:Any = self.current_tokenizer.model_max_length
_lowerCAmelCase:str = self(
a__ ,add_special_tokens=a__ ,return_tensors=a__ ,max_length=a__ ,padding=a__ ,truncation=a__ ,**a__ ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase:List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase:Optional[Any] = self(
text_target=a__ ,add_special_tokens=a__ ,return_tensors=a__ ,padding=a__ ,max_length=a__ ,truncation=a__ ,**a__ ,)
_lowerCAmelCase:str = labels['''input_ids''']
return model_inputs
| 227
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase__ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
UpperCamelCase__ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
UpperCamelCase__ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __UpperCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence'''),
'''references''': datasets.Value('''string''' ,id='''sequence'''),
}) ,codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] ,)
def __UpperCamelCase ( self : Any ,a__ : List[str] ,a__ : Union[str, Any] ,a__ : Optional[int]=None ,a__ : int=True ,a__ : List[str]=False) -> Tuple:
"""simple docstring"""
if rouge_types is None:
_lowerCAmelCase:str = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
_lowerCAmelCase:Dict = rouge_scorer.RougeScorer(rouge_types=a__ ,use_stemmer=a__)
if use_aggregator:
_lowerCAmelCase:Tuple = scoring.BootstrapAggregator()
else:
_lowerCAmelCase:Optional[int] = []
for ref, pred in zip(a__ ,a__):
_lowerCAmelCase:Any = scorer.score(a__ ,a__)
if use_aggregator:
aggregator.add_scores(a__)
else:
scores.append(a__)
if use_aggregator:
_lowerCAmelCase:Optional[int] = aggregator.aggregate()
else:
_lowerCAmelCase:Optional[Any] = {}
for key in scores[0]:
_lowerCAmelCase:Tuple = [score[key] for score in scores]
return result
| 227
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : Dict=13 , __lowerCamelCase : int=7 , __lowerCamelCase : Any=9 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=32 , __lowerCamelCase : Dict=5 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[Any]=37 , __lowerCamelCase : Union[str, Any]=8 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.0_0_2 , __lowerCamelCase : str=1 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[Any]=None , ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Any = encoder_seq_length
lowerCamelCase__ : str = decoder_seq_length
# For common tests
lowerCamelCase__ : Optional[int] = self.decoder_seq_length
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = d_ff
lowerCamelCase__ : Any = relative_attention_num_buckets
lowerCamelCase__ : Optional[Any] = dropout_rate
lowerCamelCase__ : int = initializer_factor
lowerCamelCase__ : Optional[Any] = eos_token_id
lowerCamelCase__ : Dict = pad_token_id
lowerCamelCase__ : Optional[Any] = decoder_start_token_id
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : List[str] = decoder_layers
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return TaConfig.from_pretrained("google/umt5-base" )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[Any]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Any = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : List[Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if decoder_head_mask is None:
lowerCamelCase__ : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
if cross_attn_head_mask is None:
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : str = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : List[Any] = self.prepare_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, input_dict
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , ):
'''simple docstring'''
lowerCamelCase__ : str = UMTaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCamelCase__ : str = model(
input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , )
lowerCamelCase__ : int = model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
lowerCamelCase__ : int = result.last_hidden_state
lowerCamelCase__ : Dict = result.past_key_values
lowerCamelCase__ : Dict = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : int = UMTaModel(config=UpperCAmelCase__ ).get_decoder().to(UpperCAmelCase__ ).eval()
# first forward pass
lowerCamelCase__ : List[Any] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase__ )
lowerCamelCase__ : Any = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) )
self.parent.assertTrue(len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) + 1 )
lowerCamelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : Any = model(UpperCAmelCase__ )['''last_hidden_state''']
lowerCamelCase__ : Optional[Any] = model(UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )['''last_hidden_state''']
# select random slice
lowerCamelCase__ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , ):
'''simple docstring'''
lowerCamelCase__ : int = UMTaModel(config=UpperCAmelCase__ ).to(UpperCAmelCase__ ).half().eval()
lowerCamelCase__ : str = model(**UpperCAmelCase__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(UpperCAmelCase__ ).any().item() )
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ = True
A__ = False
A__ = False
A__ = True
A__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ = [0.8, 0.9]
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Optional[Any] = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=UpperCAmelCase__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase__ )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : int = config_and_inputs[0]
lowerCamelCase__ : Union[str, Any] = UMTaForConditionalGeneration(UpperCAmelCase__ ).eval()
model.to(UpperCAmelCase__ )
lowerCamelCase__ : str = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase__ , head_masking.items() ):
lowerCamelCase__ : int = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : List[str] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , **UpperCAmelCase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : List[str] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=UpperCAmelCase__ ).to(UpperCAmelCase__ )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=UpperCAmelCase__ , legacy=UpperCAmelCase__ )
lowerCamelCase__ : List[str] = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowerCamelCase__ : Dict = tokenizer(UpperCAmelCase__ , return_tensors="pt" , padding=UpperCAmelCase__ ).input_ids
# fmt: off
lowerCamelCase__ : Optional[Any] = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase__ : List[Any] = model.generate(input_ids.to(UpperCAmelCase__ ) )
lowerCamelCase__ : int = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowerCamelCase__ : Tuple = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 717
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Union[str, Any] = is_training
lowerCamelCase__ : Any = use_token_type_ids
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Optional[int] = type_vocab_size
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : List[Any] = num_choices
lowerCamelCase__ : Optional[Any] = scope
lowerCamelCase__ : List[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any = None
lowerCamelCase__ : str = None
lowerCamelCase__ : str = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
lowerCamelCase__ : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.num_labels
lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Any = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Tuple = inputs_dict["labels"]
lowerCamelCase__ : Any = inputs_dict["labels"]
lowerCamelCase__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class _lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(__lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is
lowerCamelCase__ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
| 5
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase_ : Optional[Any] = False
class _lowerCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE__: List[str]= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[Any]= pipe(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= generator.manual_seed(0 )
SCREAMING_SNAKE_CASE__: List[str]= pipe(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Tuple= VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE__: Union[str, Any]= torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__: str= pipe(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE__: Any= image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__: List[Any]= np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 64
|
'''simple docstring'''
from functools import reduce
__lowerCamelCase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def a__ ( UpperCamelCase_ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda UpperCamelCase_, UpperCamelCase_ : str(int(UpperCamelCase_ ) * int(UpperCamelCase_ ) ), n[i : i + 13] ) )
for i in range(len(UpperCamelCase_ ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 467
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ : List[Any] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def __magic_name__( _A , _A , _A=8 ):
'''simple docstring'''
UpperCamelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __magic_name__( _A , _A=512 , _A=512 ):
'''simple docstring'''
UpperCamelCase__ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCamelCase__ = np.array(pil_image.convert("""RGB""" ) )
UpperCamelCase__ = arr.astype(np.floataa ) / 127.5 - 1
UpperCamelCase__ = np.transpose(_A , [2, 0, 1] )
UpperCamelCase__ = torch.from_numpy(_A ).unsqueeze(0 )
return image
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : UNetaDConditionModel , lowercase : DDPMScheduler , lowercase : VQModel , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
UpperCamelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A ( self : List[Any] , lowercase : List[Any] , lowercase : List[str] , lowercase : Dict ) -> Any:
'''simple docstring'''
UpperCamelCase__ = min(int(num_inference_steps * strength ) , lowercase )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : List[str] , lowercase : List[Any] , lowercase : List[str] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : str , lowercase : Optional[int] , lowercase : Tuple=None ) -> List[str]:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase )}" )
UpperCamelCase__ = image.to(device=lowercase , dtype=lowercase )
UpperCamelCase__ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase__ = image
else:
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(lowercase , lowercase ):
UpperCamelCase__ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase )
]
UpperCamelCase__ = torch.cat(lowercase , dim=0 )
else:
UpperCamelCase__ = self.movq.encode(lowercase ).latent_dist.sample(lowercase )
UpperCamelCase__ = self.movq.config.scaling_factor * init_latents
UpperCamelCase__ = torch.cat([init_latents] , dim=0 )
UpperCamelCase__ = init_latents.shape
UpperCamelCase__ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
UpperCamelCase__ = self.scheduler.add_noise(lowercase , lowercase , lowercase )
UpperCamelCase__ = init_latents
return latents
def A ( self : Tuple , lowercase : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase__ = torch.device(f"cuda:{gpu_id}" )
UpperCamelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def A ( self : Any , lowercase : Optional[Any]=0 ) -> List[str]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCamelCase__ = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
UpperCamelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A ( self : str ) -> Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self : int , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase : int = 5_1_2 , lowercase : int = 5_1_2 , lowercase : int = 1_0_0 , lowercase : float = 4.0 , lowercase : float = 0.3 , lowercase : int = 1 , lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , ) -> str:
'''simple docstring'''
UpperCamelCase__ = self._execution_device
UpperCamelCase__ = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
UpperCamelCase__ = torch.cat(lowercase , dim=0 )
UpperCamelCase__ = image_embeds.shape[0]
if isinstance(lowercase , lowercase ):
UpperCamelCase__ = torch.cat(lowercase , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ = image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCamelCase__ = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
UpperCamelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
if not isinstance(lowercase , lowercase ):
UpperCamelCase__ = [image]
if not all(isinstance(lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCamelCase__ = torch.cat([prepare_image(lowercase , lowercase , lowercase ) for i in image] , dim=0 )
UpperCamelCase__ = image.to(dtype=image_embeds.dtype , device=lowercase )
UpperCamelCase__ = self.movq.encode(lowercase )["""latents"""]
UpperCamelCase__ = latents.repeat_interleave(lowercase , dim=0 )
self.scheduler.set_timesteps(lowercase , device=lowercase )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(lowercase , lowercase , lowercase )
UpperCamelCase__ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase__ , UpperCamelCase__ = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
UpperCamelCase__ = self.prepare_latents(
lowercase , lowercase , lowercase , lowercase , image_embeds.dtype , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = {"""image_embeds""": image_embeds}
UpperCamelCase__ = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ = variance_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
UpperCamelCase__ = self.movq.decode(lowercase , force_not_quantize=lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCamelCase__ = image * 0.5 + 0.5
UpperCamelCase__ = image.clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 708
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ : Union[str, Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ : int = None
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __magic_name__( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(""" """ , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __magic_name__( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def __magic_name__( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = collections.Counter(_A ) & collections.Counter(_A )
UpperCamelCase__ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["""id"""]
UpperCamelCase__ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(_A , _A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def __magic_name__( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_A , _A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def __magic_name__( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_0_0.0 * avg_prec}
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCamelCase__ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_A , _A , """pr_exact""" )
merge_eval(_A , _A , """pr_f1""" )
merge_eval(_A , _A , """pr_oracle""" )
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 1_0_0.0 * best_score / len(_A ), best_thresh
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def __magic_name__( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(_A )
UpperCamelCase__ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(_A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(_A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(_A , _A )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(_A , _A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """HasAns""" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ : str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 265
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : List[Any] ):
lowerCAmelCase = int(__UpperCAmelCase )
assert noofclusters < len(__UpperCAmelCase )
# Find out the dimensionality
lowerCAmelCase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase = list(range(len(__UpperCAmelCase ) ) )
shuffle(__UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase = tf.placeholder('float64' , [dim] )
lowerCAmelCase = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCAmelCase , __UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase = [tf.Variable(0 ) for i in range(len(__UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase = tf.placeholder('int32' )
lowerCAmelCase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCAmelCase , __UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase = tf.reduce_mean(__UpperCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase = tf.placeholder('float' , [dim] )
lowerCAmelCase = tf.placeholder('float' , [dim] )
lowerCAmelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCAmelCase , __UpperCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase = tf.placeholder('float' , [noofclusters] )
lowerCAmelCase = tf.argmin(__UpperCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase = 100
for _ in range(__UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCAmelCase ) ):
lowerCAmelCase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase = [
sess.run(__UpperCAmelCase , feed_dict={va: vect, va: sess.run(__UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase = sess.run(
__UpperCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase = [
vectors[i]
for i in range(len(__UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase = sess.run(
__UpperCAmelCase , feed_dict={mean_input: array(__UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase = sess.run(__UpperCAmelCase )
lowerCAmelCase = sess.run(__UpperCAmelCase )
return centroids, assignments
| 4
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase__ , lowerCAmelCase__ : str = get_aligned_output_features_output_indices(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""c"""] )
self.assertEqual(UpperCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(UpperCamelCase , [0, 2] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase , [-3, -1] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [-3, -1] )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase )
# Out features must be a list
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = BackboneMixin()
lowerCAmelCase__ : str = ["""a""", """b""", """c"""]
lowerCAmelCase__ : List[str] = ["""a""", """c"""]
lowerCAmelCase__ : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase__ : List[str] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 299
| 0
|
from __future__ import annotations
class UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case = text, pattern
__snake_case , __snake_case = len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> list[int]:
'''simple docstring'''
__snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case = self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
__snake_case = self.match_in_pattern(self.text[mismatch_index] )
__snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : str = 'ABAABA'
A : int = 'AB'
A : Tuple = BoyerMooreSearch(text, pattern)
A : str = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 473
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase( _a ):
snake_case_ : str = """blenderbot-small"""
snake_case_ : List[Any] = ["""past_key_values"""]
snake_case_ : Any = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE : Any=8 , SCREAMING_SNAKE_CASE : int=2_0_4_8 , SCREAMING_SNAKE_CASE : Any=1_6 , SCREAMING_SNAKE_CASE : Tuple=8 , SCREAMING_SNAKE_CASE : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE : Optional[int]=1_6 , SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE : str=0.0 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Any=2 , **SCREAMING_SNAKE_CASE : Dict , ) -> Optional[int]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , forced_eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
class UpperCamelCase( _a ):
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: "batch"}
__snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__snake_case = {0: "batch", 1: "decoder_sequence"}
__snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
else:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(SCREAMING_SNAKE_CASE ):
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
__snake_case = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
__snake_case = common_inputs["decoder_input_ids"].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - min_num_layers
__snake_case = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
torch.zeros(SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs["attention_mask"].dtype
__snake_case = torch.cat(
[common_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
__snake_case = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(SCREAMING_SNAKE_CASE )
__snake_case = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__snake_case = self._generate_dummy_inputs_for_causal_lm(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
else:
__snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__snake_case = super(SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 473
| 1
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=True , a__=False , a__=False , a__=False , a__=2 , a__=99 , a__=0 , a__=32 , a__=5 , a__=4 , a__=0.1 , a__=0.1 , a__=5_12 , a__=2 , a__=0.02 , a__=2 , a__=4 , a__="last" , a__=True , a__=None , a__=0 , ) -> List[str]:
'''simple docstring'''
__snake_case :Any = parent
__snake_case :int = batch_size
__snake_case :Any = seq_length
__snake_case :Optional[Any] = is_training
__snake_case :Union[str, Any] = use_input_lengths
__snake_case :Optional[Any] = use_token_type_ids
__snake_case :Tuple = use_labels
__snake_case :str = gelu_activation
__snake_case :Optional[int] = sinusoidal_embeddings
__snake_case :Optional[int] = causal
__snake_case :List[str] = asm
__snake_case :List[Any] = n_langs
__snake_case :int = vocab_size
__snake_case :Union[str, Any] = n_special
__snake_case :Tuple = hidden_size
__snake_case :Tuple = num_hidden_layers
__snake_case :Tuple = num_attention_heads
__snake_case :str = hidden_dropout_prob
__snake_case :Union[str, Any] = attention_probs_dropout_prob
__snake_case :str = max_position_embeddings
__snake_case :Any = type_sequence_label_size
__snake_case :List[Any] = initializer_range
__snake_case :List[Any] = num_labels
__snake_case :int = num_choices
__snake_case :Optional[Any] = summary_type
__snake_case :List[str] = use_proj
__snake_case :List[str] = scope
__snake_case :List[str] = bos_token_id
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case :str = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case :List[str] = None
if self.use_input_lengths:
__snake_case :int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case :int = None
if self.use_token_type_ids:
__snake_case :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case :Dict = None
__snake_case :Union[str, Any] = None
__snake_case :Tuple = None
if self.use_labels:
__snake_case :Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case :Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
__snake_case :Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case :Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self ) -> Dict:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = XLMModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :int = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ )
__snake_case :Optional[int] = model(lowerCamelCase_ , langs=lowerCamelCase_ )
__snake_case :Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Optional[Any]:
'''simple docstring'''
__snake_case :int = XLMWithLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Tuple = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Dict:
'''simple docstring'''
__snake_case :Optional[Any] = XLMForQuestionAnsweringSimple(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Optional[int] = model(lowerCamelCase_ )
__snake_case :Tuple = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
__snake_case :Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> str:
'''simple docstring'''
__snake_case :List[str] = XLMForQuestionAnswering(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Union[str, Any] = model(lowerCamelCase_ )
__snake_case :Dict = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , )
__snake_case :Tuple = model(
lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , )
((__snake_case ) , ) :int = result_with_labels.to_tuple()
__snake_case :List[Any] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ )
((__snake_case ) , ) :List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> Optional[int]:
'''simple docstring'''
__snake_case :int = XLMForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Union[str, Any] = model(lowerCamelCase_ )
__snake_case :str = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = self.num_labels
__snake_case :str = XLMForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ) -> List[str]:
'''simple docstring'''
__snake_case :Any = self.num_choices
__snake_case :Optional[Any] = XLMForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
__snake_case :Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case :str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case :Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case :Optional[int] = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :str = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) :Union[str, Any] = config_and_inputs
__snake_case :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class snake_case__ ( a__ , a__ , a__ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase : Tuple = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self , a__ , a__ , a__=False ) -> List[str]:
'''simple docstring'''
__snake_case :Union[str, Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__snake_case :str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
__snake_case :Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = XLMModelTester(self )
__snake_case :str = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
__snake_case :Tuple = min_length + idx + 1
__snake_case :int = min_length + idx + 1
__snake_case :Optional[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) )
def __lowercase ( self , a__ , a__ , a__ , a__ , a__ , a__=False , a__=1 ) -> int:
'''simple docstring'''
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(
[isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , )
self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase_ ):
# adds PAD dummy token
__snake_case :Optional[Any] = min_length + idx + 1
__snake_case :Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , )
pass
@slow
def __lowercase ( self ) -> str:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case :Optional[Any] = XLMModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@slow
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Optional[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(lowerCamelCase_ )
__snake_case :List[Any] = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCamelCase_ ) # the president
__snake_case :Union[str, Any] = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__snake_case :Tuple = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
| 455
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ )
| 90
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : list ) ->list:
if len(a ) == 0:
return []
snake_case , snake_case = min(a ), max(a )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(a )]
for i in my_list:
buckets[int(i - min_value )].append(a )
return [v for bucket in buckets for v in sorted(a )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 44
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Union[str, Any] = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.0_2 , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=None , ) -> Any:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = out_indices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = ConvNextModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = ConvNextForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ = None
lowerCamelCase_ = ConvNextBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ConvNextModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(lowercase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCamelCase_ = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ConvNextModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def lowerCamelCase_ ( ):
lowerCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(lowercase )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**lowercase )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCamelCase_ = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , snake_case_ ):
lowerCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = ConvNextConfig
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = ConvNextModelTester(self )
| 463
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = (DDPMScheduler,)
def _UpperCAmelCase ( self: Dict , **__lowerCAmelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__lowerCAmelCase )
return config
def _UpperCAmelCase ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Optional[Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , )
def _UpperCAmelCase ( self: Any ) -> Tuple:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _UpperCAmelCase ( self: int ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = len(__lowerCAmelCase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(__lowerCAmelCase ) )
__UpperCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = len(__lowerCAmelCase )
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
__UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
__UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
__UpperCAmelCase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCAmelCase = pred_prev_sample
__UpperCAmelCase = torch.sum(torch.abs(__lowerCAmelCase ) )
__UpperCAmelCase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _UpperCAmelCase ( self: List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
__UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__lowerCAmelCase ):
if i == len(__lowerCAmelCase ) - 1:
__UpperCAmelCase = -1
else:
__UpperCAmelCase = timesteps[i + 1]
__UpperCAmelCase = scheduler.previous_timestep(__lowerCAmelCase )
__UpperCAmelCase = prev_t.item()
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(__lowerCAmelCase , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = [100, 87, 50, 1, 0]
__UpperCAmelCase = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**__lowerCAmelCase )
__UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 286
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = AlbertTokenizer
lowerCAmelCase__ : int = AlbertTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = True
def _UpperCAmelCase ( self: Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = "this is a test"
__UpperCAmelCase = "this is a test"
return input_text, output_text
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = "<pad>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__lowerCAmelCase ) , 30_000 )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _UpperCAmelCase ( self: str ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = "I was born in 92000, and this is falsé."
__UpperCAmelCase = tokenizer.tokenize(__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(__lowerCAmelCase )
__UpperCAmelCase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [48, 25, 21, 1_289] )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = AlbertTokenizer(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.encode("sequence builders" )
__UpperCAmelCase = tokenizer.encode("multi-sequence build" )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
__UpperCAmelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 286
| 1
|
from __future__ import annotations
from typing import TypedDict
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : str
__lowerCAmelCase : int
def UpperCamelCase ( lowercase_ ) -> list[str]:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(lowercase_ ) )]
def UpperCamelCase ( lowercase_ ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
lowercase__ : List[str] = all_rotations(lowercase_ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
lowercase__ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(lowercase_ ),
}
return response
def UpperCamelCase ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
lowercase__ : Optional[Any] = int(lowercase_ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(lowercase_ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
lowercase__ : str = [""""""] * len(lowercase_ )
for _ in range(len(lowercase_ ) ):
for i in range(len(lowercase_ ) ):
lowercase__ : List[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCamelCase__ : Tuple = """Provide a string that I will generate its BWT transform: """
lowerCamelCase__ : Dict = input(entry_msg).strip()
lowerCamelCase__ : int = bwt_transform(s)
print(
f'''Burrows Wheeler transform for string \'{s}\' results '''
f'''in \'{result["bwt_string"]}\''''
)
lowerCamelCase__ : List[str] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
f'''we get original string \'{original_string}\''''
)
| 12
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__magic_name__ : str ={
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ : Tuple ={
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = (images / 2 + 0.5).clamp(0 , 1 )
__magic_name__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__magic_name__ = numpy_to_pil(lowerCamelCase_ )
return images
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
__magic_name__ = images[None, ...]
__magic_name__ = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__magic_name__ = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__magic_name__ = [Image.fromarray(lowerCamelCase_ ) for image in images]
return pil_images
| 664
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["torch", "torchsde"]
def __init__( self : Optional[int], *_UpperCAmelCase : Optional[Any], **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
requires_backends(self, ["torch", "torchsde"] )
@classmethod
def A_ ( cls : Any, *_UpperCAmelCase : str, **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ["torch", "torchsde"] )
@classmethod
def A_ ( cls : int, *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ["torch", "torchsde"] )
| 157
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase : Optional[int] = 10
def A__ ( __lowerCAmelCase : list[int] ):
lowerCamelCase__ = 1
lowerCamelCase__ = max(__lowerCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase__ = [[] for _ in range(__lowerCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(__lowerCAmelCase )
# put each buckets' contents into list_of_ints
lowerCamelCase__ = 0
for b in range(__lowerCAmelCase ):
for i in buckets[b]:
lowerCamelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
|
from abc import ABC, abstractmethod
from typing import List, Optional
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ) -> Dict:
# test for the above condition
self.test()
def lowerCamelCase__( self :Tuple ) -> int:
a__ = 0
a__ = False
while not completed:
if counter == 1:
self.reset()
a__ = self.advance()
if not self.does_advance(__snake_case ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
a__ , a__ , a__ = self.update(__snake_case )
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def lowerCamelCase__( self :int ) -> str:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> Dict:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :int ,__snake_case :int ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :int ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str=False ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[Any] ,__snake_case :List[int] ) -> Optional[Any]:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
a__ = token_ids
a__ = len(self.token_ids )
a__ = -1 # the index of the currently fulfilled step
a__ = False
def lowerCamelCase__( self :int ) -> str:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :Dict ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.fulfilled_idx += 1
a__ = True
if self.fulfilled_idx == (self.seqlen - 1):
a__ = True
a__ = completed
else:
# failed to make progress.
a__ = True
self.reset()
return stepped, completed, reset
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = False
a__ = 0
def lowerCamelCase__( self :str ) -> Optional[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int]=False ) -> Tuple:
a__ = PhrasalConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.fulfilled_idx
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :List[str] ,__snake_case :List[List[int]] ,__snake_case :Union[str, Any]=True ) -> int:
a__ = max([len(__snake_case ) for one in nested_token_ids] )
a__ = {}
for token_ids in nested_token_ids:
a__ = root
for tidx, token_id in enumerate(__snake_case ):
if token_id not in level:
a__ = {}
a__ = level[token_id]
if no_subsets and self.has_subsets(__snake_case ,__snake_case ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
a__ = root
def lowerCamelCase__( self :Dict ,__snake_case :Any ) -> Optional[int]:
a__ = self.trie
for current_token in current_seq:
a__ = start[current_token]
a__ = list(start.keys() )
return next_tokens
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> List[Any]:
a__ = self.next_tokens(__snake_case )
return len(__snake_case ) == 0
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> List[str]:
a__ = list(root.values() )
if len(__snake_case ) == 0:
return 1
else:
return sum([self.count_leaves(__snake_case ) for nn in next_nodes] )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Any ,__snake_case :Union[str, Any] ) -> Any:
a__ = self.count_leaves(__snake_case )
return len(__snake_case ) != leaf_count
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :List[List[int]] ) -> Optional[int]:
super(__snake_case ,self ).__init__()
if not isinstance(__snake_case ,__snake_case ) or len(__snake_case ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(__snake_case ,__snake_case ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(__snake_case ,__snake_case ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
a__ = DisjunctiveTrie(__snake_case )
a__ = nested_token_ids
a__ = self.trie.max_height
a__ = []
a__ = False
def lowerCamelCase__( self :Tuple ) -> Any:
a__ = self.trie.next_tokens(self.current_seq )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ) -> Dict:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> Optional[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__snake_case )}' )
a__ = False
a__ = False
a__ = False
if self.does_advance(__snake_case ):
self.current_seq.append(__snake_case )
a__ = True
else:
a__ = True
self.reset()
a__ = self.trie.reached_leaf(self.current_seq )
a__ = completed
return stepped, completed, reset
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = False
a__ = []
def lowerCamelCase__( self :int ) -> Dict:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowerCamelCase__( self :str ,__snake_case :Any=False ) -> Tuple:
a__ = DisjunctiveConstraint(self.token_ids )
if stateful:
a__ = self.seqlen
a__ = self.current_seq
a__ = self.completed
return new_constraint
class snake_case_ :
def __init__( self :Tuple ,__snake_case :List[Constraint] ) -> int:
a__ = constraints
# max # of steps required to fulfill a given constraint
a__ = max([c.seqlen for c in constraints] )
a__ = len(__snake_case )
a__ = False
self.init_state()
def lowerCamelCase__( self :Dict ) -> Optional[int]:
a__ = []
a__ = None
a__ = [constraint.copy(stateful=__snake_case ) for constraint in self.constraints]
def lowerCamelCase__( self :Dict ) -> int:
a__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowerCamelCase__( self :Dict ) -> str:
a__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
a__ = constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
else:
a__ = self.inprogress_constraint.advance()
if isinstance(__snake_case ,__snake_case ):
token_list.append(__snake_case )
elif isinstance(__snake_case ,__snake_case ):
token_list.extend(__snake_case )
if len(__snake_case ) == 0:
return None
else:
return token_list
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[List[int]] ) -> Tuple:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
a__ , a__ = self.add(__snake_case )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowerCamelCase__( self :List[Any] ,__snake_case :int ) -> List[Any]:
if not isinstance(__snake_case ,__snake_case ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
a__ , a__ = False, False
if self.completed:
a__ = True
a__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
a__ , a__ , a__ = self.inprogress_constraint.update(__snake_case )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__snake_case ) )
a__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
a__ = None
if len(self.pending_constraints ) == 0:
# we're done!
a__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__snake_case ):
a__ , a__ , a__ = pending_constraint.update(__snake_case )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(__snake_case )
a__ = None
if not complete and stepped:
a__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
a__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
a__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowerCamelCase__( self :int ,__snake_case :Optional[int]=True ) -> Dict:
a__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
a__ = [
constraint.copy(stateful=__snake_case ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
a__ = self.inprogress_constraint.copy(stateful=__snake_case )
a__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 335
| 0
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = ["""image_processor"""]
lowerCAmelCase__ = """SamImageProcessor"""
def __init__( self , a__ ):
super().__init__(a__ )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = -10
_UpperCAmelCase = self.image_processor.size['longest_edge']
def __call__( self , a__=None , a__=None , a__=None , a__=None , a__ = None , **a__ , ):
_UpperCAmelCase = self.image_processor(
a__ , return_tensors=a__ , **a__ , )
# pop arguments that are not used in the foward but used nevertheless
_UpperCAmelCase = encoding_image_processor['original_sizes']
if hasattr(a__ , 'numpy' ): # Checks if Torch or TF tensor
_UpperCAmelCase = original_sizes.numpy()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._check_and_preprocess_points(
input_points=a__ , input_labels=a__ , input_boxes=a__ , )
_UpperCAmelCase = self._normalize_and_convert(
a__ , a__ , input_points=a__ , input_labels=a__ , input_boxes=a__ , return_tensors=a__ , )
return encoding_image_processor
def __A ( self , a__ , a__ , a__=None , a__=None , a__=None , a__="pt" , ):
if input_points is not None:
if len(a__ ) != len(a__ ):
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , a__ , original_sizes[0] ) for point in input_points
]
else:
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , a__ , a__ )
for point, original_size in zip(a__ , a__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_UpperCAmelCase , _UpperCAmelCase = self._pad_points_and_labels(a__ , a__ )
_UpperCAmelCase = np.array(a__ )
if input_labels is not None:
_UpperCAmelCase = np.array(a__ )
if input_boxes is not None:
if len(a__ ) != len(a__ ):
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , a__ , original_sizes[0] , is_bounding_box=a__ )
for box in input_boxes
]
else:
_UpperCAmelCase = [
self._normalize_coordinates(self.target_size , a__ , a__ , is_bounding_box=a__ )
for box, original_size in zip(a__ , a__ )
]
_UpperCAmelCase = np.array(a__ )
if input_boxes is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(a__ )
# boxes batch size of 1 by default
_UpperCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(a__ )
# boxes batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(a__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(a__ )
# point batch size of 1 by default
_UpperCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(a__ )
# point batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(a__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
_UpperCAmelCase = torch.from_numpy(a__ )
# point batch size of 1 by default
_UpperCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_UpperCAmelCase = tf.convert_to_tensor(a__ )
# point batch size of 1 by default
_UpperCAmelCase = tf.expand_dims(a__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def __A ( self , a__ , a__ ):
_UpperCAmelCase = max([point.shape[0] for point in input_points] )
_UpperCAmelCase = []
for i, point in enumerate(a__ ):
if point.shape[0] != expected_nb_points:
_UpperCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_UpperCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(a__ )
_UpperCAmelCase = processed_input_points
return input_points, input_labels
def __A ( self , a__ , a__ , a__ , a__=False ):
_UpperCAmelCase , _UpperCAmelCase = original_size
_UpperCAmelCase , _UpperCAmelCase = self.image_processor._get_preprocess_shape(a__ , longest_edge=a__ )
_UpperCAmelCase = deepcopy(a__ ).astype(a__ )
if is_bounding_box:
_UpperCAmelCase = coords.reshape(-1 , 2 , 2 )
_UpperCAmelCase = coords[..., 0] * (new_w / old_w)
_UpperCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_UpperCAmelCase = coords.reshape(-1 , 4 )
return coords
def __A ( self , a__=None , a__=None , a__=None , ):
if input_points is not None:
if hasattr(a__ , 'numpy' ): # Checks for TF or Torch tensor
_UpperCAmelCase = input_points.numpy().tolist()
if not isinstance(a__ , a__ ) or not isinstance(input_points[0] , a__ ):
raise ValueError('Input points must be a list of list of floating points.' )
_UpperCAmelCase = [np.array(a__ ) for input_point in input_points]
else:
_UpperCAmelCase = None
if input_labels is not None:
if hasattr(a__ , 'numpy' ):
_UpperCAmelCase = input_labels.numpy().tolist()
if not isinstance(a__ , a__ ) or not isinstance(input_labels[0] , a__ ):
raise ValueError('Input labels must be a list of list integers.' )
_UpperCAmelCase = [np.array(a__ ) for label in input_labels]
else:
_UpperCAmelCase = None
if input_boxes is not None:
if hasattr(a__ , 'numpy' ):
_UpperCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(a__ , a__ )
or not isinstance(input_boxes[0] , a__ )
or not isinstance(input_boxes[0][0] , a__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
_UpperCAmelCase = [np.array(a__ ).astype(np.floataa ) for box in input_boxes]
else:
_UpperCAmelCase = None
return input_points, input_labels, input_boxes
@property
def __A ( self ):
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(a__ ) )
def __A ( self , *a__ , **a__ ):
return self.image_processor.post_process_masks(*a__ , **a__ )
| 494
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """trocr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , a__=5_02_65 , a__=10_24 , a__=12 , a__=16 , a__=40_96 , a__="gelu" , a__=5_12 , a__=0.1 , a__=0.0 , a__=0.0 , a__=2 , a__=0.02 , a__=0.0 , a__=True , a__=False , a__=True , a__=True , a__=1 , a__=0 , a__=2 , **a__ , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = init_std
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_learned_position_embeddings
_UpperCAmelCase = layernorm_embedding
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
| 494
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : Dict = {}
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
__lowercase : List[Any] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Optional[int] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
else:
__lowercase : Optional[int] = []
for i in range(UpperCamelCase_ ):
__lowercase : List[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__lowercase : Optional[int] = output
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Optional[int]:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Tuple = []
for i in range(len(UpperCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase : List[Any] = self.token_map[placeholder_token]
__lowercase : List[str] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase : Optional[Any] = copy.copy(UpperCamelCase_ )
random.shuffle(UpperCamelCase_ )
__lowercase : Union[str, Any] = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) )
return text
def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[int]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Dict:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
| 76
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=99 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : Optional[int]=16 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
_A : Optional[int] = parent
_A : List[Any] = batch_size
_A : int = seq_length
_A : List[str] = is_training
_A : str = use_input_mask
_A : Any = use_token_type_ids
_A : List[str] = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : Dict = num_hidden_layers
_A : int = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : str = hidden_act
_A : Tuple = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[int] = type_sequence_label_size
_A : Any = initializer_range
_A : Tuple = num_labels
_A : List[str] = num_choices
_A : Any = scope
def A ( self : Union[str, Any]):
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Optional[Any] = None
if self.use_input_mask:
_A : str = random_attention_mask([self.batch_size, self.seq_length])
_A : Dict = None
if self.use_token_type_ids:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_A : Optional[int] = None
_A : Tuple = None
_A : Optional[int] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_A : int = ids_tensor([self.batch_size] , self.num_choices)
_A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any]):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , ):
_A : int = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
# create attention mask
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
_A : List[Any] = self.seq_length // 2
_A : Any = 0
# first forward pass
_A , _A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE).to_tuple()
# create hypothetical next token and extent to next_input_ids
_A : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_A : Any = ids_tensor((1,) , SCREAMING_SNAKE_CASE).item() + 1
_A : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_A : int = random_other_next_tokens
# append to next input_ids and attn_mask
_A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE)] , dim=1 , )
# get two different outputs
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
# select random slice
_A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : str = output_from_no_past[:, -1, random_slice_idx].detach()
_A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE).eval()
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
# first forward pass
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE)
_A , _A : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_A : List[str] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_A : Any = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE)[
'last_hidden_state'
]
# select random slice
_A : Dict = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=False):
_A : List[str] = BioGptForCausalLM(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[Any]):
_A : Optional[Any] = BioGptModel(SCREAMING_SNAKE_CASE)
_A : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , *SCREAMING_SNAKE_CASE : Optional[Any]):
_A : List[Any] = self.num_labels
_A : List[str] = BioGptForTokenClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A ( self : Dict):
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : int = config_and_inputs
_A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a = (BioGptForCausalLM,) if is_torch_available() else ()
a = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
def A ( self : Dict):
_A : Optional[int] = BioGptModelTester(self)
_A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : Dict):
self.config_tester.run_common_tests()
def A ( self : str):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : str):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE , gradient_checkpointing=SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : List[Any]):
_A : int = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
_A : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Optional[Any] = 'left'
# Define PAD Token = EOS Token = 50256
_A : Tuple = tokenizer.eos_token
_A : str = model.config.eos_token_id
# use different length sentences to test batching
_A : Dict = [
'Hello, my dog is a little',
'Today, I',
]
_A : int = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs['input_ids'].to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE) , )
_A : Tuple = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_A : Any = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings)
_A : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
@slow
def A ( self : int):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Union[str, Any] = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Dict = input_dict['input_ids']
_A : Dict = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_A : List[Any] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : Any):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = 3
_A : Optional[int] = 'multi_label_classification'
_A : str = input_dict['input_ids']
_A : int = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_A : Optional[int] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : str):
_A : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
_A : Dict = torch.tensor([[2, 4805, 9, 656, 21]])
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE)[0]
_A : Optional[Any] = 42384
_A : int = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def A ( self : Optional[int]):
_A : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = tokenizer('COVID-19 is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = model.generate(
**SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE , )
_A : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowercase_ ( ) -> int:
lowerCAmelCase__ : Optional[int] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowerCAmelCase__ : Any = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return image
def lowercase_ ( __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : str = dct.pop(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = val
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ : Any = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCAmelCase__ : Dict = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCAmelCase__ : Tuple = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
lowerCAmelCase__ : Dict = qkv_bias
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Tuple = 364 if """coco""" in model_name else 224
lowerCAmelCase__ : Optional[int] = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase__ : Optional[Any] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase__ : Any = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase__ : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ : Tuple = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowerCAmelCase__ : Union[str, Any] = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> List[str]:
lowerCAmelCase__ : Optional[int] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowerCAmelCase__ : str = tokenizer("""\n""" , add_special_tokens=__UpperCAmelCase ).input_ids[0]
lowerCAmelCase__ : List[str] = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
lowerCAmelCase__ : str = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowerCAmelCase__ : Dict = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowerCAmelCase__ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ : Optional[Any] = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowerCAmelCase__ : str = original_model.state_dict()
lowerCAmelCase__ : Optional[Any] = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ : int = state_dict.pop(__UpperCAmelCase )
if key.startswith("""Qformer.bert""" ):
lowerCAmelCase__ : Dict = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowerCAmelCase__ : Tuple = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowerCAmelCase__ : List[Any] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowerCAmelCase__ : Union[str, Any] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowerCAmelCase__ : int = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowerCAmelCase__ : str = key.replace("""t5""" , """language""" )
lowerCAmelCase__ : Any = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase__ : str = load_demo_image()
lowerCAmelCase__ : str = vis_processors["""eval"""](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
# create processor
lowerCAmelCase__ : Optional[Any] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
lowerCAmelCase__ : Any = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
lowerCAmelCase__ : Dict = processor(images=__UpperCAmelCase , return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase__ : List[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowerCAmelCase__ : Dict = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
lowerCAmelCase__ : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowerCAmelCase__ : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ : Optional[int] = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase__ : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase__ : str = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__UpperCAmelCase )
else:
# cast to same type
lowerCAmelCase__ : List[Any] = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowerCAmelCase__ : List[str] = """"""
lowerCAmelCase__ : List[Any] = tokenizer(__UpperCAmelCase , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = original_model.generate({"""image""": original_pixel_values} )
lowerCAmelCase__ : List[str] = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = input_ids.shape[1]
lowerCAmelCase__ : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
_A = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
_A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Dict = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Tuple = 3
lowerCAmelCase__ : Union[str, Any] = (32, 32)
lowerCAmelCase__ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase__ : Optional[int] = jax.random.uniform(UpperCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 507
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
A_ = {
"""E""": 1_2.7_0,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
A_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
A_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCAmelCase ( UpperCAmelCase )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
return x[0]
def UpperCAmelCase ( UpperCAmelCase )-> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = get_letter_count(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = ''''''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE_ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCAmelCase ,reverse=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = get_frequency_order(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
|
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ):
A = WavaVecaPhonemeCTCTokenizer
A = False
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
lowerCamelCase_ : Dict = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
lowerCamelCase_ : Union[str, Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCamelCase_ : List[Any] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
lowerCamelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=20 , UpperCamelCase_ : str=5 ) -> Tuple[str, list]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ )) for i in range(len(UpperCamelCase_ ) )]
lowerCamelCase_ : List[Any] = list(filter(lambda UpperCamelCase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=UpperCamelCase_ ) , UpperCamelCase_ ) )
if max_length is not None and len(UpperCamelCase_ ) > max_length:
lowerCamelCase_ : List[Any] = toks[:max_length]
if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0:
while len(UpperCamelCase_ ) < min_length:
lowerCamelCase_ : str = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase_ : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
if " " not in output_txt and len(UpperCamelCase_ ) > 1:
lowerCamelCase_ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ )
)
if with_prefix_space:
lowerCamelCase_ : Optional[int] = ''' ''' + output_txt
lowerCamelCase_ : Dict = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
return output_txt, output_ids
def __UpperCamelCase ( self : Tuple , **UpperCamelCase_ : str ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
lowerCamelCase_ : Union[str, Any] = tokenizer('''m xxx ɪ''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
lowerCamelCase_ : Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCamelCase_ : Union[str, Any] = tokenizer('''maɪ c''' , do_phonemize=UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , [3, 200] ) # mai should be <unk> (=3)
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Union[str, Any] = '''Hello how are you'''
lowerCamelCase_ : Optional[int] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Dict = '''Hello how are you'''
lowerCamelCase_ : int = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase_ ).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_ ).input_ids )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : str = '''Hello how are you'''
lowerCamelCase_ : Tuple = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Any = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCamelCase_ : Dict = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Dict = '''Hello how are you'''
lowerCamelCase_ : Union[str, Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Optional[int] = '''Hello how are you'''
lowerCamelCase_ : List[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(UpperCamelCase_ ).input_ids , tokenizer(UpperCamelCase_ , do_phonemize=UpperCamelCase_ ).input_ids )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCamelCase_ : Dict = tokenizer.decode(sample_ids[0] )
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
lowerCamelCase_ : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=UpperCamelCase_ )
lowerCamelCase_ : Tuple = tokenizer.batch_decode(UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , batch_tokens[0] )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : Optional[Any] = '''Hello how are you'''
lowerCamelCase_ : Optional[Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Any = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
lowerCamelCase_ : int = '''Hello how are you'''
lowerCamelCase_ : Union[str, Any] = tokenizer.phonemize(UpperCamelCase_ , phonemizer_lang='''en-us''' )
lowerCamelCase_ : Optional[Any] = tokenizer.decode(tokenizer(UpperCamelCase_ ).input_ids , filter_word_delimiter_token=UpperCamelCase_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=UpperCamelCase_ )
lowerCamelCase_ : Any = '''Hello how are you'''
lowerCamelCase_ : Any = tokenizer(UpperCamelCase_ , phonemizer_lang='''en-us''' ).input_ids
lowerCamelCase_ : Dict = tokenizer(UpperCamelCase_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : int = tokenizer.decode(UpperCamelCase_ )
lowerCamelCase_ : Any = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(UpperCamelCase_ , '''ɛ l o h aʊ a ʁ j u''' )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
lowerCamelCase_ : Optional[int] = '''Hello how Are you'''
lowerCamelCase_ : Dict = '''hello how are you'''
lowerCamelCase_ : List[str] = tokenizer(UpperCamelCase_ ).input_ids
lowerCamelCase_ : List[Any] = tokenizer(UpperCamelCase_ ).input_ids
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
lowerCamelCase_ : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCamelCase_ : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def __UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = [d[key] for d in offsets]
return retrieved_list
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCamelCase_ : List[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCamelCase_ : Union[str, Any] = tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ , filter_word_delimiter_token=UpperCamelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(UpperCamelCase_ : str , UpperCamelCase_ : int ):
self.assertTrue(isinstance(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(isinstance(outputs_list[0] , UpperCamelCase_ ) )
# transform list to ModelOutput
lowerCamelCase_ : Optional[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
[recursive_check(UpperCamelCase_ , UpperCamelCase_ ) for la, la in zip(UpperCamelCase_ , UpperCamelCase_ )]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
lowerCamelCase_ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCamelCase_ : Tuple = tokenizer.batch_decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = [tokenizer.decode(UpperCamelCase_ , output_char_offsets=UpperCamelCase_ ) for ids in sample_ids]
check_list_tuples_equal(UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : List[str] = tokenizer.vocab_size
lowerCamelCase_ : Optional[int] = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCamelCase_ : Tuple = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
lowerCamelCase_ : Dict = tokenizer.add_tokens(UpperCamelCase_ )
lowerCamelCase_ : Dict = tokenizer.vocab_size
lowerCamelCase_ : Dict = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) )
lowerCamelCase_ : Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCamelCase_ : List[Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
lowerCamelCase_ : List[Any] = tokenizer.add_special_tokens(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = tokenizer.vocab_size
lowerCamelCase_ : Dict = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) )
lowerCamelCase_ : Union[str, Any] = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
lowerCamelCase_ : Dict = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ : List[Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
lowerCamelCase_ : Tuple = tokenizer.convert_tokens_to_string(UpperCamelCase_ )
self.assertIsInstance(output['''text'''] , UpperCamelCase_ )
| 501
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a ( __lowerCamelCase ):
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : jnp.ndarray
class a ( nn.Module ):
__lowerCAmelCase : int
__lowerCAmelCase : Tuple[int] = (16, 32, 96, 2_56)
__lowerCAmelCase : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[int] = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
snake_case__ : List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : Tuple = self.block_out_channels[i]
snake_case__ : Optional[int] = self.block_out_channels[i + 1]
snake_case__ : Union[str, Any] = nn.Conv(
__lowercase ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(__lowercase )
snake_case__ : Dict = nn.Conv(
__lowercase ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(__lowercase )
snake_case__ : Any = blocks
snake_case__ : List[Any] = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :str ,__lowercase :Tuple ):
snake_case__ : Tuple = self.conv_in(__lowercase )
snake_case__ : List[str] = nn.silu(__lowercase )
for block in self.blocks:
snake_case__ : Optional[int] = block(__lowercase )
snake_case__ : int = nn.silu(__lowercase )
snake_case__ : Dict = self.conv_out(__lowercase )
return embedding
@flax_register_to_config
class a ( nn.Module , __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : int = 32
__lowerCAmelCase : int = 4
__lowerCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__lowerCAmelCase : Union[bool, Tuple[bool]] = False
__lowerCAmelCase : Tuple[int] = (3_20, 6_40, 12_80, 12_80)
__lowerCAmelCase : int = 2
__lowerCAmelCase : Union[int, Tuple[int]] = 8
__lowerCAmelCase : Optional[Union[int, Tuple[int]]] = None
__lowerCAmelCase : int = 12_80
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : bool = False
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = "rgb"
__lowerCAmelCase : Tuple[int] = (16, 32, 96, 2_56)
def __lowerCamelCase ( self :List[str] ,__lowercase :jax.random.KeyArray ):
# init input tensors
snake_case__ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : Optional[int] = jnp.zeros(__lowercase ,dtype=jnp.floataa )
snake_case__ : Dict = jnp.ones((1,) ,dtype=jnp.intaa )
snake_case__ : Any = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
snake_case__ : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : Any = jnp.zeros(__lowercase ,dtype=jnp.floataa )
snake_case__ : Optional[Any] = jax.random.split(__lowercase )
snake_case__ : Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )["params"]
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.block_out_channels
snake_case__ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Tuple = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
snake_case__ : str = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
snake_case__ : Any = FlaxTimestepEmbedding(__lowercase ,dtype=self.dtype )
snake_case__ : Optional[int] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
snake_case__ : Optional[int] = self.only_cross_attention
if isinstance(__lowercase ,__lowercase ):
snake_case__ : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowercase ,__lowercase ):
snake_case__ : int = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : Any = []
snake_case__ : List[str] = []
snake_case__ : Dict = block_out_channels[0]
snake_case__ : int = nn.Conv(
__lowercase ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__lowercase )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : int = output_channel
snake_case__ : str = block_out_channels[i]
snake_case__ : str = i == len(__lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=__lowercase ,out_channels=__lowercase ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
snake_case__ : Any = FlaxDownBlockaD(
in_channels=__lowercase ,out_channels=__lowercase ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(__lowercase )
for _ in range(self.layers_per_block ):
snake_case__ : int = nn.Conv(
__lowercase ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__lowercase )
if not is_final_block:
snake_case__ : List[str] = nn.Conv(
__lowercase ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(__lowercase )
snake_case__ : Union[str, Any] = down_blocks
snake_case__ : Dict = controlnet_down_blocks
# mid
snake_case__ : Union[str, Any] = block_out_channels[-1]
snake_case__ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowercase ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
snake_case__ : Any = nn.Conv(
__lowercase ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :Optional[int] ,__lowercase :float = 1.0 ,__lowercase :bool = True ,__lowercase :bool = False ,):
snake_case__ : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : int = jnp.flip(__lowercase ,axis=1 )
# 1. time
if not isinstance(__lowercase ,jnp.ndarray ):
snake_case__ : int = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(__lowercase ,jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : str = timesteps.astype(dtype=jnp.floataa )
snake_case__ : List[Any] = jnp.expand_dims(__lowercase ,0 )
snake_case__ : List[str] = self.time_proj(__lowercase )
snake_case__ : List[str] = self.time_embedding(__lowercase )
# 2. pre-process
snake_case__ : Dict = jnp.transpose(__lowercase ,(0, 2, 3, 1) )
snake_case__ : Tuple = self.conv_in(__lowercase )
snake_case__ : List[str] = jnp.transpose(__lowercase ,(0, 2, 3, 1) )
snake_case__ : Any = self.controlnet_cond_embedding(__lowercase )
sample += controlnet_cond
# 3. down
snake_case__ : Tuple = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowercase ,__lowercase ):
snake_case__ : List[Any] = down_block(__lowercase ,__lowercase ,__lowercase ,deterministic=not train )
else:
snake_case__ : Optional[int] = down_block(__lowercase ,__lowercase ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Any = self.mid_block(__lowercase ,__lowercase ,__lowercase ,deterministic=not train )
# 5. contronet blocks
snake_case__ : Any = ()
for down_block_res_sample, controlnet_block in zip(__lowercase ,self.controlnet_down_blocks ):
snake_case__ : str = controlnet_block(__lowercase )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Dict = controlnet_down_block_res_samples
snake_case__ : Any = self.controlnet_mid_block(__lowercase )
# 6. scaling
snake_case__ : Tuple = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowercase ,mid_block_res_sample=__lowercase )
| 707
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 0
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
lowerCAmelCase__ : Dict =None
lowerCAmelCase__ : Union[str, Any] ={
'7B': 1_10_08,
'13B': 1_38_24,
'30B': 1_79_20,
'65B': 2_20_16,
'70B': 2_86_72,
}
lowerCAmelCase__ : List[str] ={
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def a__ ( A__, A__=1, A__=2_5_6 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def a__ ( A__ ):
with open(A__, 'r' ) as f:
return json.load(A__ )
def a__ ( A__, A__ ):
with open(A__, 'w' ) as f:
json.dump(A__, A__ )
def a__ ( A__, A__, A__, A__=True ):
os.makedirs(A__, exist_ok=A__ )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(A__, 'tmp' )
os.makedirs(A__, exist_ok=A__ )
SCREAMING_SNAKE_CASE_ : Any = read_json(os.path.join(A__, 'params.json' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE_ : Optional[int] = params['n_layers']
SCREAMING_SNAKE_CASE_ : Any = params['n_heads']
SCREAMING_SNAKE_CASE_ : Any = n_heads // num_shards
SCREAMING_SNAKE_CASE_ : int = params['dim']
SCREAMING_SNAKE_CASE_ : str = dim // n_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1_00_00.0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1.0 / (base ** (torch.arange(0, A__, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE_ : List[str] = params['n_kv_heads'] # for GQA / MQA
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE_ : Tuple = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE_ : Dict = n_heads
SCREAMING_SNAKE_CASE_ : Tuple = n_heads_per_shard
SCREAMING_SNAKE_CASE_ : int = dim
# permute for sliced rotary
def permute(A__, A__=n_heads, A__=dim, A__=dim ):
return w.view(A__, dima // n_heads // 2, 2, A__ ).transpose(1, 2 ).reshape(A__, A__ )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(os.path.join(A__, 'consolidated.00.pth' ), map_location='cpu' )
else:
# Sharded
SCREAMING_SNAKE_CASE_ : str = [
torch.load(os.path.join(A__, F'''consolidated.{i:02d}.pth''' ), map_location='cpu' )
for i in range(A__ )
]
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : str = {'weight_map': {}}
for layer_i in range(A__ ):
SCREAMING_SNAKE_CASE_ : Any = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE_ : Dict = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE_ : int = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(A__, A__, A__ )
for i in range(A__ )
], dim=0, ).reshape(A__, A__ ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
A__, A__, A__ )
for i in range(A__ )
], dim=0, ).reshape(A__, A__ ), A__, A__, A__, )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
A__, A__, A__ )
for i in range(A__ )
], dim=0, ).reshape(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(A__ )], dim=1 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(A__ )], dim=0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(A__ )], dim=1 )
SCREAMING_SNAKE_CASE_ : Any = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(A__ )], dim=0 )
SCREAMING_SNAKE_CASE_ : str = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : Any = filename
param_count += v.numel()
torch.save(A__, os.path.join(A__, A__ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(A__ )], dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(A__ )], dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE_ : Tuple = filename
param_count += v.numel()
torch.save(A__, os.path.join(A__, A__ ) )
# Write configs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'total_size': param_count * 2}
write_json(A__, os.path.join(A__, 'pytorch_model.bin.index.json' ) )
SCREAMING_SNAKE_CASE_ : str = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
SCREAMING_SNAKE_CASE_ : Tuple = params['multiple_of'] if 'multiple_of' in params else 2_5_6
SCREAMING_SNAKE_CASE_ : List[str] = LlamaConfig(
hidden_size=A__, intermediate_size=compute_intermediate_size(A__, A__, A__ ), num_attention_heads=params['n_heads'], num_hidden_layers=params['n_layers'], rms_norm_eps=params['norm_eps'], num_key_value_heads=A__, )
config.save_pretrained(A__ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM.from_pretrained(A__, torch_dtype=torch.floataa, low_cpu_mem_usage=A__ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(A__, safe_serialization=A__ )
shutil.rmtree(A__ )
def a__ ( A__, A__ ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE_ : List[str] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_class(A__ )
tokenizer.save_pretrained(A__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--input_dir', help='Location of LLaMA weights, which contains tokenizer.model and model folders', )
parser.add_argument(
'--model_size', choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'], )
parser.add_argument(
'--output_dir', help='Location to write HF model and tokenizer', )
parser.add_argument('--safe_serialization', type=A__, help='Whether or not to save using `safetensors`.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(args.input_dir, 'tokenizer.model' )
write_tokenizer(args.output_dir, A__ )
if __name__ == "__main__":
main()
| 101
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase__ : Optional[Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Any = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
import re
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : str = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_encoder_block_conv_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_encoder_block_conv_in.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : str = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_decoder_block_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Dict = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_decoder_block_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_decoder_block_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_prior_cond_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Dict = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_prior_cond_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_key
SCREAMING_SNAKE_CASE_ : Optional[Any] = replace_key(lowerCamelCase_ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : str = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : Dict = original_key
SCREAMING_SNAKE_CASE_ : int = original_key
SCREAMING_SNAKE_CASE_ : int = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ) -> Dict:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : int = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCamelCase_ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCamelCase_ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JukeboxConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = JukeboxModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for i, dict_name in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
SCREAMING_SNAKE_CASE_ : int = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'vqvae' if i == 0 else F'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : Any = fix_jukebox_keys(lowerCamelCase_ , model.state_dict() , lowerCamelCase_ , lowerCamelCase_ )
weight_dict.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
return weight_dict
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 105
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _UpperCAmelCase ( a : Tuple ):
snake_case__ = args.pruning_method
snake_case__ = args.threshold
snake_case__ = args.model_name_or_path.rstrip("""/""" )
snake_case__ = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
snake_case__ = torch.load(os.path.join(a , """pytorch_model.bin""" ) )
snake_case__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
snake_case__ = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
snake_case__ = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
snake_case__ = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
snake_case__ = MagnitudeBinarizer.apply(inputs=a , threshold=a )
snake_case__ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F'''{prefix_}mask_scores''']
snake_case__ = TopKBinarizer.apply(a , a )
snake_case__ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F'''{prefix_}mask_scores''']
snake_case__ = ThresholdBinarizer.apply(a , a , a )
snake_case__ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
snake_case__ = name[:-6]
snake_case__ = model[F'''{prefix_}mask_scores''']
snake_case__ , snake_case__ = -0.1, 1.1
snake_case__ = torch.sigmoid(a )
snake_case__ = s * (r - l) + l
snake_case__ = s_bar.clamp(min=0.0 , max=1.0 )
snake_case__ = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
snake_case__ = os.path.join(
os.path.dirname(a ) , F'''bertarized_{os.path.basename(a )}''' )
if not os.path.isdir(a ):
shutil.copytree(a , a )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a , os.path.join(a , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
a__ = parser.parse_args()
main(args)
| 99
|
from collections.abc import Callable
def _UpperCAmelCase ( a : Callable[[float], float] , a : float , a : float ):
snake_case__ = a
snake_case__ = b
if function(a ) == 0: # one of the a or b is a root for the function
return a
elif function(a ) == 0:
return b
elif (
function(a ) * function(a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(a ) == 0:
return mid
elif function(a ) * function(a ) < 0:
snake_case__ = mid
else:
snake_case__ = mid
snake_case__ = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( a : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 99
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.json"""}
lowercase__ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
lowercase__ = {"""mgp-str""": 27}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase="[GO]" , lowercase="[GO]" , lowercase="[s]" , lowercase="[GO]" , **lowercase ):
super().__init__(
unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , pad_token=lowercase , **lowercase , )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_lowerCamelCase : Dict = json.load(lowercase )
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def A_ ( self ):
return len(self.vocab )
def A_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def A_ ( self , lowercase ):
_lowerCamelCase : str = []
for s in text:
char_tokens.extend(lowercase )
return char_tokens
def A_ ( self , lowercase ):
return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.decoder.get(lowercase )
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowercase ) )
return
_lowerCamelCase : List[str] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
return (vocab_file,)
| 630
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """dpr"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase = 0 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : int = position_embedding_type
| 630
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
UpperCAmelCase =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _A ( _a : Tuple , _a : str , _a : int , _a : Dict , _a : Union[str, Any] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
A = getattr(_a , _a )
if weight_type is not None:
A = getattr(_a , _a ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _A ( _a : Union[str, Any] , _a : str ):
"""simple docstring"""
A = []
A = fairseq_model.state_dict()
A = hf_model.feature_extractor
A = hf_model.adapter
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == """group""" , )
A = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(_a , _a , _a , _a )
A = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A = True
if "*" in mapped_key:
A = name.split(_a )[0].split(""".""" )[-2]
A = mapped_key.replace("""*""" , _a )
if "weight_g" in name:
A = """weight_g"""
elif "weight_v" in name:
A = """weight_v"""
elif "bias" in name:
A = """bias"""
elif "weight" in name:
A = """weight"""
else:
A = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f'Unused weights: {unused_weights}' )
def _A ( _a : int , _a : Optional[int] , _a : Any , _a : Union[str, Any] , _a : Tuple ):
"""simple docstring"""
A = full_name.split("""conv_layers.""" )[-1]
A = name.split(""".""" )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[str] , _a : Any , _a : Union[str, Any] , _a : Optional[int] ):
"""simple docstring"""
A = full_name.split("""adaptor.""" )[-1]
A = name.split(""".""" )
if items[1].isdigit():
A = int(items[1] )
else:
A = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(_a , _a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(_a )
def _A ( _a : List[Any] ):
"""simple docstring"""
A , A = emb.weight.shape
A = nn.Linear(_a , _a , bias=_a )
A = emb.weight.data
return lin_layer
@torch.no_grad()
def _A ( _a : List[str] , _a : Tuple , _a : Dict , _a : Optional[Any] , _a : str , _a : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Tuple , _a : int , _a : Tuple , ):
"""simple docstring"""
A = WavaVecaConfig.from_pretrained(
_a , add_adapter=_a , adapter_stride=_a , adapter_kernel_size=_a , use_auth_token=_a , output_hidden_size=_a , )
A = MBartConfig.from_pretrained(_a )
# load model
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
A = model[0].eval()
# load feature extractor
A = WavaVecaFeatureExtractor.from_pretrained(_a , use_auth_token=_a )
# set weights for wav2vec2 encoder
A = WavaVecaModel(_a )
recursively_load_weights_wavaveca(model.encoder , _a )
# load decoder weights
A = MBartForCausalLM(_a )
A , A = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_a )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A = SpeechEncoderDecoderModel(encoder=_a , decoder=_a )
A = False
A = MBartaaTokenizer(_a )
tokenizer.save_pretrained(_a )
A = hf_wavavec.config.to_dict()
A = tokenizer.pad_token_id
A = tokenizer.bos_token_id
A = tokenizer.eos_token_id
A = """mbart50"""
A = """wav2vec2"""
A = tokenizer.eos_token_id
A = 2_5_0_0_0_4
A = tokenizer.eos_token_id
A = SpeechEncoderDecoderConfig.from_dict(_a )
hf_wavavec.save_pretrained(_a )
feature_extractor.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
UpperCAmelCase =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 701
|
"""simple docstring"""
def _A ( _a : int | float | str ):
"""simple docstring"""
try:
A = float(_a )
except ValueError:
raise ValueError("""Please enter a valid number""" )
A = decimal - int(_a )
if fractional_part == 0:
return int(_a ), 1
else:
A = len(str(_a ).split(""".""" )[1] )
A = int(decimal * (1_0**number_of_frac_digits) )
A = 1_0**number_of_frac_digits
A , A = denominator, numerator
while True:
A = dividend % divisor
if remainder == 0:
break
A , A = divisor, remainder
A , A = numerator / divisor, denominator / divisor
return int(_a ), int(_a )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 255
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
snake_case_ : Optional[int] = 4
snake_case_ : int = 3
class snake_case__ ( lowerCAmelCase_ ):
pass
def lowercase_ ( _lowercase : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(_lowercase ):
yield {"i": i, "shard": shard}
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Tuple = int(os.environ["RANK"] )
UpperCAmelCase : Tuple = int(os.environ["WORLD_SIZE"] )
UpperCAmelCase : Optional[int] = ArgumentParser()
parser.add_argument("--streaming" , type=_lowercase )
parser.add_argument("--local_rank" , type=_lowercase )
parser.add_argument("--num_workers" , type=_lowercase , default=0 )
UpperCAmelCase : List[str] = parser.parse_args()
UpperCAmelCase : str = args.streaming
UpperCAmelCase : Optional[int] = args.num_workers
UpperCAmelCase : Union[str, Any] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(_lowercase )]}
UpperCAmelCase : Any = IterableDataset.from_generator(_lowercase , gen_kwargs=_lowercase )
if not streaming:
UpperCAmelCase : Optional[Any] = Dataset.from_list(list(_lowercase ) )
UpperCAmelCase : Union[str, Any] = split_dataset_by_node(_lowercase , rank=_lowercase , world_size=_lowercase )
UpperCAmelCase : str = torch.utils.data.DataLoader(_lowercase , num_workers=_lowercase )
UpperCAmelCase : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase : Optional[int] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 595
|
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : str , lowercase : Union[str, Any] , lowercase : Dict=None , lowercase : Any=None ):
'''simple docstring'''
UpperCAmelCase : Any = self.layer[current_layer](lowercase , lowercase , head_mask[current_layer] )
UpperCAmelCase : List[str] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , lowerCAmelCase_ , )
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
super().__init__(lowercase )
UpperCAmelCase : int = BertEncoderWithPabee(lowercase )
self.init_weights()
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : str = 0
def __lowerCAmelCase ( self : str , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase : List[str] = threshold
def __lowerCAmelCase ( self : Optional[int] , lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = patience
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = 0
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : Optional[int] = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(lowercase )
@add_start_docstrings_to_model_forward(lowercase )
def __lowerCAmelCase ( self : Optional[int] , lowercase : Optional[Any]=None , lowercase : Optional[Any]=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : str=None , lowercase : List[Any]=None , lowercase : str=None , lowercase : Union[str, Any]=None , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : Any=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Tuple = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Dict = torch.ones(lowercase , device=lowercase )
if token_type_ids is None:
UpperCAmelCase : Dict = torch.zeros(lowercase , dtype=torch.long , device=lowercase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(lowercase , lowercase , lowercase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : Dict = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : List[str] = torch.ones(lowercase , device=lowercase )
UpperCAmelCase : Optional[int] = self.invert_attention_mask(lowercase )
else:
UpperCAmelCase : Union[str, Any] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Union[str, Any] = self.get_head_mask(lowercase , self.config.num_hidden_layers )
UpperCAmelCase : List[str] = self.embeddings(
input_ids=lowercase , position_ids=lowercase , token_type_ids=lowercase , inputs_embeds=lowercase )
UpperCAmelCase : Tuple = embedding_output
if self.training:
UpperCAmelCase : Dict = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : str = self.encoder.adaptive_forward(
lowercase , current_layer=lowercase , attention_mask=lowercase , head_mask=lowercase )
UpperCAmelCase : Tuple = self.pooler(lowercase )
UpperCAmelCase : Optional[int] = output_layers[i](output_dropout(lowercase ) )
res.append(lowercase )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : str = self.encoder(
lowercase , attention_mask=lowercase , head_mask=lowercase , encoder_hidden_states=lowercase , encoder_attention_mask=lowercase , )
UpperCAmelCase : Union[str, Any] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : Tuple = [output_layers[self.config.num_hidden_layers - 1](lowercase )]
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
lowercase , current_layer=lowercase , attention_mask=lowercase , head_mask=lowercase )
UpperCAmelCase : Union[str, Any] = self.pooler(lowercase )
UpperCAmelCase : int = output_layers[i](lowercase )
if regression:
UpperCAmelCase : Union[str, Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : List[Any] = 0
else:
UpperCAmelCase : int = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Dict = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowercase ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : Any = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. ''' , lowerCAmelCase_ , )
class snake_case__ ( lowerCAmelCase_ ):
def __init__( self : List[str] , lowercase : int ):
'''simple docstring'''
super().__init__(lowercase )
UpperCAmelCase : Tuple = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(lowercase )
UpperCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Optional[Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase )
def __lowerCAmelCase ( self : Optional[int] , lowercase : Optional[int]=None , lowercase : int=None , lowercase : Union[str, Any]=None , lowercase : Tuple=None , lowercase : List[str]=None , lowercase : List[Any]=None , lowercase : Any=None , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.bert(
input_ids=lowercase , attention_mask=lowercase , token_type_ids=lowercase , position_ids=lowercase , head_mask=lowercase , inputs_embeds=lowercase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Optional[int] = (logits[-1],)
if labels is not None:
UpperCAmelCase : Dict = None
UpperCAmelCase : int = 0
for ix, logits_item in enumerate(lowercase ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Tuple = MSELoss()
UpperCAmelCase : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : List[Any] = CrossEntropyLoss()
UpperCAmelCase : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : Any = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Union[str, Any] = (total_loss / total_weights,) + outputs
return outputs
| 595
| 1
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def __A ( a_ : str ,a_ : str = "cpu" ,a_ : Union[str, None] = None ):
lowerCAmelCase : Optional[int] = torch.load(a_ ,map_location=a_ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a_ ,torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
lowerCAmelCase : str = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase : Tuple = src_path
torch.save(a_ ,a_ )
if __name__ == "__main__":
fire.Fire(convert)
| 551
|
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase = []
def __A ( a_ : list[list[int]] ,a_ : int ,a_ : int ):
for i in range(len(a_ ) ):
if board[row][i] == 1:
return False
for i in range(len(a_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(a_ ,-1 ,-1 ) ,range(a_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(a_ ,-1 ,-1 ) ,range(a_ ,len(a_ ) ) ):
if board[i][j] == 1:
return False
return True
def __A ( a_ : list[list[int]] ,a_ : int ):
if row >= len(a_ ):
solution.append(a_ )
printboard(a_ )
print()
return True
for i in range(len(a_ ) ):
if is_safe(a_ ,a_ ,a_ ):
lowerCAmelCase : Any = 1
solve(a_ ,row + 1 )
lowerCAmelCase : List[Any] = 0
return False
def __A ( a_ : list[list[int]] ):
for i in range(len(a_ ) ):
for j in range(len(a_ ) ):
if board[i][j] == 1:
print("Q" ,end=" " )
else:
print("." ,end=" " )
print()
# n=int(input("The no. of queens"))
lowerCAmelCase = 8
lowerCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 551
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.