code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class A ( unittest.TestCase ):
def _A (self ):
__lowercase= inspect.getfile(accelerate.test_utils )
__lowercase= os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__lowercase= test_metrics
@require_cpu
def _A (self ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def _A (self ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def _A (self ):
self.test_metrics.main()
@require_multi_gpu
def _A (self ):
print(f'Found {torch.cuda.device_count()} devices.' )
__lowercase= ['torchrun', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 295
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowerCAmelCase ( A , A ):
lowerCAmelCase_ = "bit"
lowerCAmelCase_ = ["preactivation", "bottleneck"]
lowerCAmelCase_ = ["SAME", "VALID"]
def __init__( self : Union[str, Any] , __lowercase : Tuple=3 , __lowercase : Tuple=64 , __lowercase : List[str]=[256, 512, 1024, 2048] , __lowercase : int=[3, 4, 6, 3] , __lowercase : Optional[Any]="preactivation" , __lowercase : str="relu" , __lowercase : Tuple=None , __lowercase : int=32 , __lowercase : int=0.0 , __lowercase : Dict=False , __lowercase : List[Any]=32 , __lowercase : List[str]=1 , __lowercase : str=None , __lowercase : Any=None , **__lowercase : List[str] , ):
"""simple docstring"""
super().__init__(**__lowercase )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase =global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
__lowercase =num_channels
__lowercase =embedding_size
__lowercase =hidden_sizes
__lowercase =depths
__lowercase =layer_type
__lowercase =hidden_act
__lowercase =global_padding
__lowercase =num_groups
__lowercase =drop_path_rate
__lowercase =embedding_dynamic_padding
__lowercase =output_stride
__lowercase =width_factor
__lowercase =['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__lowercase ) + 1 )]
__lowercase , __lowercase =get_aligned_output_features_output_indices(
out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names )
| 141
| 0
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
UpperCamelCase = float('''nan''')
class __UpperCAmelCase :
def __init__( self: Union[str, Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = sys.stdout
_SCREAMING_SNAKE_CASE = open(lowerCamelCase__ , """a""" )
def __getattr__( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
return getattr(self.stdout , lowerCamelCase__ )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
self.stdout.write(lowerCamelCase__ )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , lowerCamelCase__ , 0 , re.M ) )
def __lowerCamelCase ( snake_case__=80 ,snake_case__=False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
# deal with critical env vars
_SCREAMING_SNAKE_CASE = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_SCREAMING_SNAKE_CASE = os.environ.get(snake_case__ ,snake_case__ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
_SCREAMING_SNAKE_CASE = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote ,sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = ''''''
while len(snake_case__ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
_SCREAMING_SNAKE_CASE = ''''''
return "\\\n".join(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = re.sub(r"""[\\\n]+""" ,""" """ ,args.base_cmd )
# remove --output_dir if any and set our own
_SCREAMING_SNAKE_CASE = re.sub("""--output_dir\s+[^\s]+""" ,"""""" ,args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
_SCREAMING_SNAKE_CASE = re.sub("""--overwrite_output_dir\s+""" ,"""""" ,args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 ,1_00 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} ,)
_SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ ,capture_output=snake_case__ ,text=snake_case__ )
if verbose:
print("""STDOUT""" ,result.stdout )
print("""STDERR""" ,result.stderr )
# save the streams
_SCREAMING_SNAKE_CASE = variation.replace(""" """ ,"""-""" )
with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' ,"""w""" ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' ,"""w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json' ,"""r""" ,encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = F'{id}: {variation:<{longest_variation_len}}'
_SCREAMING_SNAKE_CASE = F'{preamble}: '
_SCREAMING_SNAKE_CASE = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ) ,desc=snake_case__ ,leave=snake_case__ ):
_SCREAMING_SNAKE_CASE = process_run_single(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
_SCREAMING_SNAKE_CASE = F'\33[2K\r{outcome}'
if len(snake_case__ ) > 0:
_SCREAMING_SNAKE_CASE = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_SCREAMING_SNAKE_CASE = round(mean_metrics[target_metric_key] ,2 )
_SCREAMING_SNAKE_CASE = F'{outcome} {mean_target}'
if len(snake_case__ ) > 1:
results_str += F' {tuple(round(snake_case__ ,2 ) for x in results )}'
print(snake_case__ )
_SCREAMING_SNAKE_CASE = variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def __lowerCamelCase ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = pd.DataFrame(snake_case__ )
_SCREAMING_SNAKE_CASE = '''variation'''
_SCREAMING_SNAKE_CASE = '''diff_%'''
_SCREAMING_SNAKE_CASE = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_SCREAMING_SNAKE_CASE = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
_SCREAMING_SNAKE_CASE = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
_SCREAMING_SNAKE_CASE = df.apply(
lambda snake_case__ : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 ,axis="""columns""" ,)
# re-order columns
_SCREAMING_SNAKE_CASE = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_SCREAMING_SNAKE_CASE = df.reindex(snake_case__ ,axis="""columns""" ) # reorder cols
# capitalize
_SCREAMING_SNAKE_CASE = df.rename(str.capitalize ,axis="""columns""" )
# make the cols as narrow as possible
_SCREAMING_SNAKE_CASE = df.rename(lambda snake_case__ : c.replace("""_""" ,"""<br>""" ) ,axis="""columns""" )
_SCREAMING_SNAKE_CASE = df.rename(lambda snake_case__ : c.replace("""_""" ,"""\n""" ) ,axis="""columns""" )
_SCREAMING_SNAKE_CASE = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__ ,floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ ,floatfmt=""".2f""" )]
print("""\n\n""".join(snake_case__ ) )
def __lowerCamelCase ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" ,default=snake_case__ ,type=snake_case__ ,required=snake_case__ ,help="""Base cmd""" ,)
parser.add_argument(
"""--variations""" ,default=snake_case__ ,type=snake_case__ ,nargs="""+""" ,required=snake_case__ ,help="""Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'""" ,)
parser.add_argument(
"""--base-variation""" ,default=snake_case__ ,type=snake_case__ ,help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" ,)
parser.add_argument(
"""--target-metric-key""" ,default=snake_case__ ,type=snake_case__ ,required=snake_case__ ,help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" ,)
parser.add_argument(
"""--report-metric-keys""" ,default="""""" ,type=snake_case__ ,help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples""" ,)
parser.add_argument(
"""--repeat-times""" ,default=1 ,type=snake_case__ ,help="""How many times to re-run each variation - an average will be reported""" ,)
parser.add_argument(
"""--output_dir""" ,default="""output_benchmark""" ,type=snake_case__ ,help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" ,)
parser.add_argument(
"""--verbose""" ,default=snake_case__ ,action="""store_true""" ,help="""Whether to show the outputs of each run or just the benchmark progress""" ,)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
_SCREAMING_SNAKE_CASE = get_base_command(snake_case__ ,snake_case__ )
# split each dimension into its --foo variations
_SCREAMING_SNAKE_CASE = [list(map(str.strip ,re.split(r"""\|""" ,snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_SCREAMING_SNAKE_CASE = list(map(str.strip ,map(""" """.join ,itertools.product(*snake_case__ ) ) ) )
_SCREAMING_SNAKE_CASE = max(len(snake_case__ ) for x in variations )
# split wanted keys
_SCREAMING_SNAKE_CASE = args.report_metric_keys.split()
# capture prints into a log file for convenience
_SCREAMING_SNAKE_CASE = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
_SCREAMING_SNAKE_CASE = Tee(snake_case__ )
print(F'\n*** Running {len(snake_case__ )} benchmarks:' )
print(F'Base command: {" ".join(snake_case__ )}' )
_SCREAMING_SNAKE_CASE = '''variation'''
_SCREAMING_SNAKE_CASE = []
for id, variation in enumerate(tqdm(snake_case__ ,desc="""Total completion: """ ,leave=snake_case__ ) ):
_SCREAMING_SNAKE_CASE = base_cmd + variation.split()
results.append(
process_run(
id + 1 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,args.target_metric_key ,snake_case__ ,args.repeat_times ,snake_case__ ,args.verbose ,) )
process_results(snake_case__ ,args.target_metric_key ,snake_case__ ,args.base_variation ,snake_case__ )
if __name__ == "__main__":
main()
| 357
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125
| 0
|
def lowerCAmelCase_ ( A_):
UpperCamelCase__: list[list[int]] = [[0 for _ in range(A_)] for _ in range(m + 1)]
for i in range(m + 1):
UpperCamelCase__: str = 1
for n in range(m + 1):
for k in range(1 ,A_):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
A__: List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
A__: Dict = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 149
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _a :
"""simple docstring"""
def __init__( self: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str=sys.maxsize ):
'''simple docstring'''
UpperCamelCase__: List[Any] = "bilinear"
UpperCamelCase__: Optional[int] = max_size
UpperCamelCase__: Optional[int] = short_edge_length
def __call__( self: Optional[Any] , __lowerCamelCase: str ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = []
for img in imgs:
UpperCamelCase__ , UpperCamelCase__: Any = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase__: Optional[int] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase__: Dict = size * 1.0 / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = size, scale * w
else:
UpperCamelCase__ , UpperCamelCase__: Dict = scale * h, size
if max(__lowerCamelCase , __lowerCamelCase ) > self.max_size:
UpperCamelCase__: str = self.max_size * 1.0 / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[str] = newh * scale
UpperCamelCase__: Any = neww * scale
UpperCamelCase__: List[str] = int(neww + 0.5 )
UpperCamelCase__: List[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase__: Dict = Image.fromarray(__lowerCamelCase )
UpperCamelCase__: Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase__: str = np.asarray(__lowerCamelCase )
else:
UpperCamelCase__: Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase__: Optional[Any] = nn.functional.interpolate(
__lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=__lowerCamelCase ).squeeze(0 )
img_augs.append(__lowerCamelCase )
return img_augs
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase__: Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase__: Union[str, Any] = cfg.SIZE_DIVISIBILITY
UpperCamelCase__: Tuple = cfg.PAD_VALUE
UpperCamelCase__: str = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase__: int = cfg.MODEL.DEVICE
UpperCamelCase__: str = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase__: List[Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = tuple(max(__lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase__: Tuple = [im.shape[-2:] for im in images]
UpperCamelCase__: Optional[int] = [
nn.functional.pad(
__lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__lowerCamelCase , __lowerCamelCase )
]
return torch.stack(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
def __call__( self: str , __lowerCamelCase: Dict , __lowerCamelCase: Any=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase__: int = [images]
if single_image:
assert len(__lowerCamelCase ) == 1
for i in range(len(__lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__lowerCamelCase , images.pop(__lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(__lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase__: int = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase__: int = self.aug(__lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase__: Any = [self.normalizer(__lowerCamelCase ) for x in images]
# now pad them to do the following operations
UpperCamelCase__ , UpperCamelCase__: Any = self.pad(__lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase__: Optional[int] = torch.true_divide(__lowerCamelCase , __lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase_ ( A_ ,A_):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase_ ( A_ ,A_):
assert torch.isfinite(A_).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase__ , UpperCamelCase__: int = box_size
tensor[:, 0].clamp_(min=0 ,max=A_)
tensor[:, 1].clamp_(min=0 ,max=A_)
tensor[:, 2].clamp_(min=0 ,max=A_)
tensor[:, 3].clamp_(min=0 ,max=A_)
| 149
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCAmelCase__ ( lowerCamelCase ):
def decorator(lowerCamelCase ):
lowercase :Dict = getattr(lowerCamelCase, "handle_key", [] )
handle += [key]
setattr(lowerCamelCase, "handle_key", lowerCamelCase )
return func
return decorator
def UpperCAmelCase__ ( *lowerCamelCase ):
def decorator(lowerCamelCase ):
lowercase :List[Any] = getattr(lowerCamelCase, "handle_key", [] )
handle += keys
setattr(lowerCamelCase, "handle_key", lowerCamelCase )
return func
return decorator
class __lowerCAmelCase ( lowerCAmelCase):
def __new__( cls: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict , _lowerCAmelCase: List[str] ):
lowercase :Optional[int] = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , "key_handler" ):
setattr(_lowerCAmelCase , "key_handler" , {} )
setattr(_lowerCAmelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
lowercase :Optional[Any] = getattr(_lowerCAmelCase , "handle_key" , [] )
for key in handled_keys:
lowercase :Union[str, Any] = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE ( cls: Any ):
lowercase :Any = get_character()
if char != KEYMAP["undefined"]:
lowercase :Optional[int] = ord(_lowerCAmelCase )
lowercase :Optional[Any] = cls.key_handler.get(_lowerCAmelCase )
if handler:
lowercase :Optional[int] = char
return handler(cls )
else:
return None
def UpperCAmelCase__ ( cls ):
return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
| 158
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = BlipImageProcessor()
__UpperCamelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__UpperCamelCase = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **lowercase ) -> List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def __lowerCamelCase ( self , **lowercase ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def __lowerCamelCase ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCamelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
__UpperCamelCase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(lowercase , return_tensors="""np""" )
__UpperCamelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = processor(text=lowercase )
__UpperCamelCase = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(lowercase )
__UpperCamelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
__UpperCamelCase = """lower newer"""
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 349
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase__ :
def __init__( self , lowercase , ) -> Union[str, Any]:
__UpperCamelCase = parent
__UpperCamelCase = 1_3
__UpperCamelCase = 7
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = 9_9
__UpperCamelCase = 3_2
__UpperCamelCase = 2
__UpperCamelCase = 4
__UpperCamelCase = 3_7
__UpperCamelCase = """gelu"""
__UpperCamelCase = 0.1
__UpperCamelCase = 0.1
__UpperCamelCase = 5_1_2
__UpperCamelCase = 1_6
__UpperCamelCase = 2
__UpperCamelCase = 0.02
__UpperCamelCase = 3
__UpperCamelCase = 4
__UpperCamelCase = None
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
__UpperCamelCase = TFDistilBertModel(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
__UpperCamelCase = [input_ids, input_mask]
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertForMaskedLM(config=lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = TFDistilBertForQuestionAnswering(config=lowercase )
__UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForSequenceClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> int:
__UpperCamelCase = self.num_choices
__UpperCamelCase = TFDistilBertForMultipleChoice(lowercase )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
__UpperCamelCase = self.num_labels
__UpperCamelCase = TFDistilBertForTokenClassification(lowercase )
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__UpperCamelCase = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = TFDistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase , dim=3_7 )
def __lowerCamelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase )
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase )
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase )
@slow
def __lowerCamelCase ( self ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCamelCase = TFDistilBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase = model(lowercase )[0]
__UpperCamelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase )
__UpperCamelCase = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 )
| 349
| 1
|
from datetime import datetime as dt
import os
from github import Github
lowerCAmelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] )
lowercase__ : int = g.get_repo("huggingface/transformers" )
lowercase__ : Any = repo.get_issues(state="open" )
for issue in open_issues:
lowercase__ : int = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
lowercase__ : int = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 121
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[int] = logging.get_logger(__name__)
__snake_case :Union[str, Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''autoformer'''
UpperCamelCase__ : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "student_t" , __SCREAMING_SNAKE_CASE : str = "nll" , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 3, 4, 5, 6, 7] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : str = "gelu" , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : float = 0.1 , __SCREAMING_SNAKE_CASE : int = 100 , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 25 , __SCREAMING_SNAKE_CASE : int = 3 , **__SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__SCREAMING_SNAKE_CASE) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 49
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionDiffEditPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
_lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase = frozenset([] )
def lowercase ( self : Any ) -> Dict:
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, )
lowercase : Tuple = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, )
lowercase : Any = DDIMInverseScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCAmelCase, set_alpha_to_zero=lowerCAmelCase, )
torch.manual_seed(0 )
lowercase : int = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
lowercase : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, )
lowercase : str = CLIPTextModel(lowerCAmelCase )
lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : Tuple, lowerCAmelCase : List[str], lowerCAmelCase : Tuple=0 ) -> Union[str, Any]:
lowercase : List[Any] = floats_tensor((1, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[Any] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Tuple = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Union[str, Any], lowerCAmelCase : Tuple, lowerCAmelCase : Dict=0 ) -> Optional[Any]:
lowercase : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : List[str] = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : List[Any] = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int], lowerCAmelCase : Any, lowerCAmelCase : List[str]=0 ) -> Union[str, Any]:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Tuple = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : Optional[int] = torch.manual_seed(lowerCAmelCase )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Optional[int] ) -> str:
if not hasattr(self.pipeline_class, '_optional_components' ):
return
lowercase : Optional[int] = self.get_dummy_components()
lowercase : int = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = pipe(**lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCAmelCase )
lowercase : Any = self.pipeline_class.from_pretrained(lowerCAmelCase )
pipe_loaded.to(lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCAmelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCAmelCase, lowerCAmelCase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowercase : Tuple = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Optional[Any] = pipe_loaded(**lowerCAmelCase )[0]
lowercase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(lowerCAmelCase, 1e-4 )
def lowercase ( self : Any ) -> str:
lowercase : Union[str, Any] = 'cpu'
lowercase : Optional[int] = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_mask_inputs(lowerCAmelCase )
lowercase : str = pipe.generate_mask(**lowerCAmelCase )
lowercase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
lowercase : List[str] = np.array([0] * 9 )
lowercase : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def lowercase ( self : int ) -> str:
lowercase : int = 'cpu'
lowercase : Dict = self.get_dummy_components()
lowercase : Optional[int] = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : Tuple = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def lowercase ( self : List[str] ) -> Tuple:
lowercase : Dict = 'cpu'
lowercase : Any = self.get_dummy_components()
lowercase : List[Any] = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase : List[str] = DPMSolverMultistepScheduler(**lowerCAmelCase )
lowercase : Dict = DPMSolverMultistepInverseScheduler(**lowerCAmelCase )
lowercase : str = self.pipeline_class(**lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = self.get_dummy_inversion_inputs(lowerCAmelCase )
lowercase : int = pipe.invert(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
lowercase : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799], )
lowercase : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase, 1e-3 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def lowercase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def lowercase ( cls : Optional[int] ) -> Tuple:
lowercase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
lowercase : Optional[Any] = raw_image.convert('RGB' ).resize((768, 768) )
lowercase : Any = raw_image
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : str = torch.manual_seed(0 )
lowercase : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : List[Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : int = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : Tuple = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase ).latents
lowercase : str = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, output_type='numpy', ).images[0]
lowercase : Dict = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase : Dict = torch.manual_seed(0 )
lowercase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = 'a bowl of fruit'
lowercase : List[Any] = 'a bowl of pears'
lowercase : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=lowerCAmelCase, target_prompt=lowerCAmelCase, generator=lowerCAmelCase, )
lowercase : List[str] = pipe.invert(
prompt=lowerCAmelCase, image=self.raw_image, inpaint_strength=0.7, generator=lowerCAmelCase, num_inference_steps=25, ).latents
lowercase : int = pipe(
prompt=lowerCAmelCase, mask_image=lowerCAmelCase, image_latents=lowerCAmelCase, generator=lowerCAmelCase, negative_prompt=lowerCAmelCase, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
lowercase : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 255
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar('''T''')
lowerCamelCase_ = TypeVar('''U''')
class __A( Generic[T, U] ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__(self ):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class __A( Generic[T, U] ):
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__(self ):
UpperCamelCase__ = ["""DoubleLinkedList"""]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class __A( Generic[T, U] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
def __init__(self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__(self ):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__(self , SCREAMING_SNAKE_CASE_ ):
return key in self.cache
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE_ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ = 1_28 ):
def cache_decorator_inner(SCREAMING_SNAKE_CASE_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*SCREAMING_SNAKE_CASE_ )
cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE_ , """cache_info""" , SCREAMING_SNAKE_CASE_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
|
from __future__ import annotations
def __magic_name__ ( __a : list[list[int]] ):
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__a ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__a ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 178
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = ["""onnx"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
requires_backends(self , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
requires_backends(cls , ["onnx"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
requires_backends(cls , ["onnx"] )
| 48
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
UpperCAmelCase_ : List[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
UpperCAmelCase_ : str = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
UpperCAmelCase_ : Dict = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
UpperCAmelCase_ : Any = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
UpperCAmelCase_ : int = new_key.replace(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : Any = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = new_key.replace('''encoder''', '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
UpperCAmelCase_ : str = re.sub(R'''layers_(\d+)''', R'''layer.\1''', SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Dict = flax_dict[key]
UpperCAmelCase_ : List[str] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
UpperCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T )
else:
UpperCAmelCase_ : List[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Dict=False ) -> int:
UpperCAmelCase_ : Optional[Any] = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
UpperCAmelCase_ : List[str] = PixaStructVisionConfig()
UpperCAmelCase_ : List[str] = PixaStructTextConfig()
else:
UpperCAmelCase_ : Dict = PixaStructVisionConfig(
hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 )
UpperCAmelCase_ : Optional[int] = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 )
UpperCAmelCase_ : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[int] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
UpperCAmelCase_ : Tuple = PixaStructImageProcessor()
UpperCAmelCase_ : Optional[int] = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
UpperCAmelCase_ : Union[str, Any] = 4096
UpperCAmelCase_ : Union[str, Any] = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
snake_case_ : Optional[Any] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 125
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Any = 1000 ) ->int:
'''simple docstring'''
a : int = 1, 1
a : Optional[Any] = 2
while True:
a : List[Any] = 0
a : Optional[Any] = fa + fa
a : Dict = fa, f
index += 1
for _ in str(lowerCamelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 367
|
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=18 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ) -> Dict:
_lowerCAmelCase = size if size is not None else {"height": 18, "width": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_normalize
def _snake_case ( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[str] = ImageGPTImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = ImageGPTImageProcessingTester(self )
@property
def _snake_case ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "clusters" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(_lowerCAmelCase , "image_processor.json" )
image_processor_first.to_json_file(_lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
_lowerCAmelCase = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
_lowerCAmelCase = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _snake_case ( self ) -> Tuple:
pass
def __a():
'''simple docstring'''
_lowerCAmelCase = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
_lowerCAmelCase = Image.open(dataset[4]["file"] )
_lowerCAmelCase = Image.open(dataset[5]["file"] )
_lowerCAmelCase = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
_lowerCAmelCase = prepare_images()
# test non-batched
_lowerCAmelCase = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_lowerCAmelCase = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
_lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_lowerCAmelCase = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 158
|
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __a(SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0]
def __a(SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : int = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 353
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : list[int] , _UpperCAmelCase : str ):
lowerCAmelCase = int(_UpperCAmelCase )
# Initialize Result
lowerCAmelCase = []
# Traverse through all denomination
for denomination in reversed(_UpperCAmelCase ):
# Find denominations
while int(_UpperCAmelCase ) >= int(_UpperCAmelCase ):
total_value -= int(_UpperCAmelCase )
answer.append(_UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCamelCase : Any = []
__UpperCamelCase : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCamelCase : Any = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__UpperCamelCase : int = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__UpperCamelCase : Any = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f'''Following is minimal change for {value}: ''')
__UpperCamelCase : List[str] = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 309
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple=1_0_0 , lowerCAmelCase_ : Tuple=1_3 , lowerCAmelCase_ : Tuple=3_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=[0, 1, 2, 3] , ):
"""simple docstring"""
_A: str = parent
_A: str = 1_0_0
_A: Optional[int] = batch_size
_A: Optional[int] = image_size
_A: Tuple = patch_size
_A: Union[str, Any] = num_channels
_A: int = is_training
_A: Union[str, Any] = use_labels
_A: List[str] = hidden_size
_A: Optional[Any] = num_hidden_layers
_A: Any = num_attention_heads
_A: Dict = intermediate_size
_A: Union[str, Any] = hidden_act
_A: List[str] = hidden_dropout_prob
_A: Tuple = attention_probs_dropout_prob
_A: Union[str, Any] = type_sequence_label_size
_A: int = initializer_range
_A: Optional[int] = scope
_A: Optional[Any] = out_indices
_A: Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A: List[str] = (image_size // patch_size) ** 2
_A: Tuple = num_patches + 1
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A: Optional[Any] = None
_A: int = None
if self.use_labels:
_A: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A: Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A: List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict ):
"""simple docstring"""
_A: Optional[Any] = BeitModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: str = BeitForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Dict = model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: Tuple = self.type_sequence_label_size
_A: str = BeitForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: List[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A: Optional[Any] = 1
_A: List[str] = BeitForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A: Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[str] = self.num_labels
_A: str = BeitForSemanticSegmentation(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A: Tuple = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A: Tuple = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = self.prepare_config_and_inputs()
_A , _A , _A , _A: Optional[Any] = config_and_inputs
_A: Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[str] = False
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Dict = BeitModelTester(self )
_A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __magic_name__ ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __magic_name__ ( self : str ):
"""simple docstring"""
_A , _A: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[int] = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A: Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
_A , _A: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A: Optional[int] = model_class(lowerCAmelCase_ )
_A: Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A: int = [*signature.parameters.keys()]
_A: Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase_ )
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_A , _A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_A: str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCAmelCase_ ), BeitForMaskedImageModeling]:
continue
_A: str = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
_A: Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A: str = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : str ):
"""simple docstring"""
_A , _A: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A: List[Any] = False
_A: int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCAmelCase_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A: str = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
_A: Optional[int] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
_A: Union[str, Any] = model(**lowerCAmelCase_ ).loss
loss.backward()
def __magic_name__ ( self : Dict ):
"""simple docstring"""
_A , _A: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A: int = _config_zero_init(lowerCAmelCase_ )
for model_class in self.all_model_classes:
_A: Any = model_class(config=lowerCAmelCase_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A: Optional[Any] = BeitModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase__ ( ) -> Optional[int]:
_A: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCAmelCase_ )
_A: Optional[Any] = self.default_image_processor
_A: Optional[Any] = prepare_img()
_A: str = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values.to(lowerCAmelCase_ )
# prepare bool_masked_pos
_A: List[str] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: Union[str, Any] = model(pixel_values=lowerCAmelCase_ , bool_masked_pos=lowerCAmelCase_ )
_A: Optional[Any] = outputs.logits
# verify the logits
_A: Any = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: str = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase_ , atol=1e-2 ) )
@slow
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
_A: List[str] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCAmelCase_ )
_A: Dict = self.default_image_processor
_A: Optional[Any] = prepare_img()
_A: Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: List[Any] = model(**lowerCAmelCase_ )
_A: Dict = outputs.logits
# verify the logits
_A: List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: Optional[int] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
_A: Optional[Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : List[str] ):
"""simple docstring"""
_A: Any = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCAmelCase_ )
_A: List[Any] = self.default_image_processor
_A: Tuple = prepare_img()
_A: Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: Union[str, Any] = model(**lowerCAmelCase_ )
_A: List[str] = outputs.logits
# verify the logits
_A: Optional[int] = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: Optional[int] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
_A: str = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: Any = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_A: Tuple = model.to(lowerCAmelCase_ )
_A: List[str] = BeitImageProcessor(do_resize=lowerCAmelCase_ , size=6_4_0 , do_center_crop=lowerCAmelCase_ )
_A: Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A: str = Image.open(ds[0]['''file'''] )
_A: Optional[int] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: Tuple = model(**lowerCAmelCase_ )
_A: Optional[Any] = outputs.logits
# verify the logits
_A: Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_A: Union[str, Any] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
_A: List[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCAmelCase_ , )
else:
_A: Tuple = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: int = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
_A: str = model.to(lowerCAmelCase_ )
_A: List[str] = BeitImageProcessor(do_resize=lowerCAmelCase_ , size=6_4_0 , do_center_crop=lowerCAmelCase_ )
_A: List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
_A: Tuple = Image.open(ds[0]['''file'''] )
_A: List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_A: Union[str, Any] = model(**lowerCAmelCase_ )
_A: int = outputs.logits.detach().cpu()
_A: Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ , target_sizes=[(5_0_0, 3_0_0)] )
_A: List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
_A: Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase_ )
_A: int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase_ )
| 121
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_A: List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
_A: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
_A: Optional[int] = ['''key_proj''', '''value_proj''', '''query_proj''']
_A: List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_A: List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
_A: Optional[int] = prophet
_A: Tuple = prophet_old
else:
_A: Tuple = prophet.prophetnet
_A: Any = prophet_old.model
_A: int = False
for attribute in attributes:
if attribute in mapping:
_A: Optional[int] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
_A: int = attribute
elif hasattr(a , a ):
_A: Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_A: Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_A: Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_A: str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_A: Dict = True
break
elif attribute in special_keys and hasattr(a , '''in_proj_weight''' ):
_A: Optional[int] = old_model.in_proj_weight.shape[0] // 3
_A: Tuple = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_A: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_A: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_A: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_A: Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_A: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_A: int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_A: Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_A: Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_A: List[Any] = True
break
if attribute.isdigit():
_A: Tuple = model[int(a )]
_A: int = old_model[int(a )]
else:
_A: Union[str, Any] = getattr(a , a )
if old_attribute == "":
_A: Union[str, Any] = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_A: List[Any] = getattr(a , a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 121
| 1
|
'''simple docstring'''
def lowercase__( ):
"""simple docstring"""
return 1
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(snake_case__ )
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(snake_case__ )
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(snake_case__ )
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(snake_case__ )
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(snake_case__ )
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(snake_case__ )
def lowercase__( __UpperCamelCase: int = 2_00 ):
"""simple docstring"""
return two_pound(snake_case__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 365
|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a :
'''simple docstring'''
def __init__( self, A, A=2, A=3, A=4, A=2, A=7, A=True, A=True, A=True, A=True, A=99, A=36, A=3, A=4, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=6, A=6, A=3, A=4, A=None, A=1_000, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Optional[int] = use_token_type_ids
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : str = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : Optional[int] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE : Union[str, Any] = text_seq_length
SCREAMING_SNAKE_CASE : List[str] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE : int = self.text_seq_length + self.image_seq_length
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE : str = bbox[i, j, 3]
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE : Optional[Any] = bbox[i, j, 2]
SCREAMING_SNAKE_CASE : Any = bbox[i, j, 0]
SCREAMING_SNAKE_CASE : Tuple = t
SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : int = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
SCREAMING_SNAKE_CASE : Optional[int] = model(A, pixel_values=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : List[str] = model(A, bbox=A, pixel_values=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[int] = model(A, bbox=A, pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE : List[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE : List[str] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : str = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
A, bbox=A, pixel_values=A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = False
A : List[str] = False
A : Union[str, Any] = False
A : Optional[Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : List[Any] = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self, A, A, A, A, A ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[int] = {
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(A, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in get_values(A ):
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
elif model_class in [
*get_values(A ),
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=A, )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : List[str] = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Any = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).pixel_values.to(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
input_ids=input_ids.to(A ), bbox=bbox.to(A ), pixel_values=pixel_values.to(A ), )
# verify the logits
SCREAMING_SNAKE_CASE : str = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], A, atol=1E-4 ) )
| 246
| 0
|
from manim import *
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Tuple:
snake_case_ = Rectangle(height=0.5 , width=0.5 )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case_ = Rectangle(height=0.25 , width=0.25 )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = VGroup(a , a ).arrange(a , buff=0 )
snake_case_ = Text('CPU' , font_size=24 )
snake_case_ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a )
snake_case_ = [mem.copy() for i in range(4 )]
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = Text('GPU' , font_size=24 )
snake_case_ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
gpu.move_to([-1, -1, 0] )
self.add(a )
snake_case_ = [mem.copy() for i in range(6 )]
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = Text('Model' , font_size=24 )
snake_case_ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
model.move_to([3, -1.0, 0] )
self.add(a )
snake_case_ = []
snake_case_ = []
for i, rect in enumerate(a ):
snake_case_ = fill.copy().set_fill(a , opacity=0.8 )
target.move_to(a )
model_arr.append(a )
snake_case_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(a , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a )
self.add(*a , *a )
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = [meta_mem.copy() for i in range(6 )]
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = VGroup(*a ).arrange(a , buff=0 )
snake_case_ = VGroup(a , a ).arrange(a , buff=0 )
snake_case_ = Text('Disk' , font_size=24 )
snake_case_ = Group(a , a ).arrange(a , buff=0.5 , aligned_edge=a )
disk.move_to([-4, -1.25, 0] )
self.add(a , a )
snake_case_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case_ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a , a )
snake_case_ = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a )
snake_case_ = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a ) )
snake_case_ = Square(0.3 )
input.set_fill(a , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a , buff=0.5 )
self.play(Write(a ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a , buff=0.02 )
self.play(MoveToTarget(a ) )
self.play(FadeOut(a ) )
snake_case_ = Arrow(start=a , end=a , color=a , buff=0.5 )
a.next_to(model_arr[0].get_left() , a , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case_ = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) )
snake_case_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(a ) , Circumscribe(model_arr[0] , color=a , **a ) , Circumscribe(model_cpu_arr[0] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , a , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case_ = AnimationGroup(
FadeOut(a , run_time=0.5 ) , MoveToTarget(a , run_time=0.5 ) , FadeIn(a , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case_ = 0.7
self.play(
Circumscribe(model_arr[i] , **a ) , Circumscribe(cpu_left_col_base[i] , **a ) , Circumscribe(cpu_left_col_base[i + 1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , Circumscribe(model_arr[i + 1] , color=a , **a ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a , **a ) , Circumscribe(cpu_left_col_base[-1] , color=a , **a ) , Circumscribe(gpu_rect[0] , color=a , **a ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case_ = a_c
snake_case_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(a ) , FadeOut(a , run_time=0.5 ) , )
snake_case_ = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a , run_time=3 ) , MoveToTarget(a ) )
self.wait()
| 178
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> int:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = FlaxRobertaModelTester(self )
@slow
def _UpperCamelCase ( self ) -> str:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('roberta-base' , from_pt=a )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
| 178
| 1
|
def snake_case_(_UpperCamelCase ) -> int:
"""simple docstring"""
assert isinstance(_UpperCamelCase , _UpperCamelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_snake_case = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(_UpperCamelCase )
else:
_snake_case = sylvester(number - 1 )
_snake_case = num - 1
_snake_case = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 361
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 - _cos) / 2
_snake_case = 1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = (1 + _cos) / 2
_snake_case = -1 - _cos
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = _sin / 2
_snake_case = 0
_snake_case = -ba
_snake_case = 1 + alpha
_snake_case = -2 * _cos
_snake_case = 1 - alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 1 - alpha
_snake_case = -2 * _cos
_snake_case = 1 + alpha
_snake_case = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = 1 + alpha * big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha * big_a
_snake_case = 1 + alpha / big_a
_snake_case = -2 * _cos
_snake_case = 1 - alpha / big_a
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (pmc + aaa)
_snake_case = 2 * big_a * mpc
_snake_case = big_a * (pmc - aaa)
_snake_case = ppmc + aaa
_snake_case = -2 * pmpc
_snake_case = ppmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
_snake_case = tau * frequency / samplerate
_snake_case = sin(_UpperCamelCase )
_snake_case = cos(_UpperCamelCase )
_snake_case = _sin / (2 * q_factor)
_snake_case = 10 ** (gain_db / 40)
_snake_case = (big_a + 1) - (big_a - 1) * _cos
_snake_case = (big_a + 1) + (big_a - 1) * _cos
_snake_case = (big_a - 1) - (big_a + 1) * _cos
_snake_case = (big_a - 1) + (big_a + 1) * _cos
_snake_case = 2 * sqrt(_UpperCamelCase ) * alpha
_snake_case = big_a * (ppmc + aaa)
_snake_case = -2 * big_a * pmpc
_snake_case = big_a * (ppmc - aaa)
_snake_case = pmc + aaa
_snake_case = 2 * mpc
_snake_case = pmc - aaa
_snake_case = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 278
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A__ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A__ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0]
@deprecated(snake_case , """Please use tf.data to implement this functionality.""" )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
_lowerCAmelCase = _readaa(snake_case )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
_lowerCAmelCase = _readaa(snake_case )
_lowerCAmelCase = _readaa(snake_case )
_lowerCAmelCase = _readaa(snake_case )
_lowerCAmelCase = bytestream.read(rows * cols * num_images )
_lowerCAmelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta )
_lowerCAmelCase = data.reshape(snake_case , snake_case , snake_case , 1 )
return data
@deprecated(snake_case , """Please use tf.one_hot on tensors.""" )
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = labels_dense.shape[0]
_lowerCAmelCase = numpy.arange(snake_case ) * num_classes
_lowerCAmelCase = numpy.zeros((num_labels, num_classes) )
_lowerCAmelCase = 1
return labels_one_hot
@deprecated(snake_case , """Please use tf.data to implement this functionality.""" )
def _UpperCAmelCase ( snake_case , snake_case=False , snake_case=10 ):
"""simple docstring"""
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
_lowerCAmelCase = _readaa(snake_case )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
_lowerCAmelCase = _readaa(snake_case )
_lowerCAmelCase = bytestream.read(snake_case )
_lowerCAmelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case , snake_case )
return labels
class __lowerCAmelCase :
@deprecated(
_snake_case , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , _snake_case , _snake_case , _snake_case=False , _snake_case=False , _snake_case=dtypes.floataa , _snake_case=True , _snake_case=None , ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = random_seed.get_seed(_snake_case )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_lowerCAmelCase = dtypes.as_dtype(_snake_case ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
_lowerCAmelCase = 10000
_lowerCAmelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
_lowerCAmelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_lowerCAmelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_lowerCAmelCase = images.astype(numpy.floataa )
_lowerCAmelCase = numpy.multiply(_snake_case , 1.0 / 255.0 )
_lowerCAmelCase = images
_lowerCAmelCase = labels
_lowerCAmelCase = 0
_lowerCAmelCase = 0
@property
def snake_case ( self ):
"""simple docstring"""
return self._images
@property
def snake_case ( self ):
"""simple docstring"""
return self._labels
@property
def snake_case ( self ):
"""simple docstring"""
return self._num_examples
@property
def snake_case ( self ):
"""simple docstring"""
return self._epochs_completed
def snake_case ( self , _snake_case , _snake_case=False , _snake_case=True ):
"""simple docstring"""
if fake_data:
_lowerCAmelCase = [1] * 784
_lowerCAmelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_snake_case )],
[fake_label for _ in range(_snake_case )],
)
_lowerCAmelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(_snake_case )
_lowerCAmelCase = self.images[perma]
_lowerCAmelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_lowerCAmelCase = self._num_examples - start
_lowerCAmelCase = self._images[start : self._num_examples]
_lowerCAmelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_lowerCAmelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(_snake_case )
_lowerCAmelCase = self.images[perm]
_lowerCAmelCase = self.labels[perm]
# Start next epoch
_lowerCAmelCase = 0
_lowerCAmelCase = batch_size - rest_num_examples
_lowerCAmelCase = self._index_in_epoch
_lowerCAmelCase = self._images[start:end]
_lowerCAmelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_lowerCAmelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case , """Please write your own downloading logic.""" )
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if not gfile.Exists(snake_case ):
gfile.MakeDirs(snake_case )
_lowerCAmelCase = os.path.join(snake_case , snake_case )
if not gfile.Exists(snake_case ):
urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310
with gfile.GFile(snake_case ) as f:
_lowerCAmelCase = f.size()
print("""Successfully downloaded""" , snake_case , snake_case , """bytes.""" )
return filepath
@deprecated(
snake_case , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def _UpperCAmelCase ( snake_case , snake_case=False , snake_case=False , snake_case=dtypes.floataa , snake_case=True , snake_case=50_00 , snake_case=None , snake_case=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case )
_lowerCAmelCase = fake()
_lowerCAmelCase = fake()
_lowerCAmelCase = fake()
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
if not source_url: # empty string check
_lowerCAmelCase = DEFAULT_SOURCE_URL
_lowerCAmelCase = """train-images-idx3-ubyte.gz"""
_lowerCAmelCase = """train-labels-idx1-ubyte.gz"""
_lowerCAmelCase = """t10k-images-idx3-ubyte.gz"""
_lowerCAmelCase = """t10k-labels-idx1-ubyte.gz"""
_lowerCAmelCase = _maybe_download(
snake_case , snake_case , source_url + train_images_file )
with gfile.Open(snake_case , """rb""" ) as f:
_lowerCAmelCase = _extract_images(snake_case )
_lowerCAmelCase = _maybe_download(
snake_case , snake_case , source_url + train_labels_file )
with gfile.Open(snake_case , """rb""" ) as f:
_lowerCAmelCase = _extract_labels(snake_case , one_hot=snake_case )
_lowerCAmelCase = _maybe_download(
snake_case , snake_case , source_url + test_images_file )
with gfile.Open(snake_case , """rb""" ) as f:
_lowerCAmelCase = _extract_images(snake_case )
_lowerCAmelCase = _maybe_download(
snake_case , snake_case , source_url + test_labels_file )
with gfile.Open(snake_case , """rb""" ) as f:
_lowerCAmelCase = _extract_labels(snake_case , one_hot=snake_case )
if not 0 <= validation_size <= len(snake_case ):
_lowerCAmelCase = (
"""Validation size should be between 0 and """
F'{len(snake_case )}. Received: {validation_size}.'
)
raise ValueError(snake_case )
_lowerCAmelCase = train_images[:validation_size]
_lowerCAmelCase = train_labels[:validation_size]
_lowerCAmelCase = train_images[validation_size:]
_lowerCAmelCase = train_labels[validation_size:]
_lowerCAmelCase = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
_lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case )
_lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case )
_lowerCAmelCase = _DataSet(snake_case , snake_case , **snake_case )
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
| 82
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''gpt_neox'''
def __init__( self : List[Any] , __UpperCAmelCase : List[Any]=50432 , __UpperCAmelCase : Any=6144 , __UpperCAmelCase : List[str]=44 , __UpperCAmelCase : List[Any]=64 , __UpperCAmelCase : List[str]=24576 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Tuple=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Tuple=2048 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Union[str, Any]=1E-5 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : str=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = rotary_pct
_A = rotary_emb_base
_A = attention_dropout
_A = hidden_dropout
_A = classifier_dropout
_A = initializer_range
_A = layer_norm_eps
_A = use_cache
_A = tie_word_embeddings
_A = use_parallel_residual
_A = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
_A = self.rope_scaling.get("type" , __UpperCAmelCase )
_A = self.rope_scaling.get("factor" , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 79
| 0
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 356
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69
| 0
|
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 61
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ : Tuple = nums[0]
UpperCamelCase__ : Dict = 0
for num in nums[1:]:
UpperCamelCase__ ,UpperCamelCase__ : Any = (
max_excluding + num,
max(__UpperCAmelCase , __UpperCAmelCase ),
)
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> int:
UpperCamelCase__ : Optional[Any] = git.Repo(search_parent_directories=__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = {
'''repo_id''': str(__UpperCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(__UpperCAmelCase , '''git_log.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=4 )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict:
if params.n_gpu <= 0:
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Union[str, Any] = -1
UpperCamelCase__ : str = True
UpperCamelCase__ : Dict = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase__ : Optional[int] = int(os.environ['''WORLD_SIZE'''] )
UpperCamelCase__ : Any = int(os.environ['''N_GPU_NODE'''] )
UpperCamelCase__ : Optional[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
UpperCamelCase__ : Optional[int] = params.world_size // params.n_gpu_per_node
UpperCamelCase__ : int = params.global_rank // params.n_gpu_per_node
UpperCamelCase__ : Any = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = 1
UpperCamelCase__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase__ : Any = params.node_id == 0 and params.local_rank == 0
UpperCamelCase__ : Optional[int] = params.n_nodes > 1
# summary
UpperCamelCase__ : List[Any] = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Tuple:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 247
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a_ ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase_ =json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
lowerCamelCase_ =args.output + '''.pt'''
lowerCamelCase_ =OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase_ =tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase_ =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase_ =reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase_ =int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase_ =8
lowerCamelCase_ ='''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase_ =key_name[-9:-7]
for i in range(16 ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase_ =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase_ =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/g''' ):
lowerCamelCase_ ='''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/att''' ):
lowerCamelCase_ =int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase_ =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase_ =state[:, 0, :, :]
lowerCamelCase_ =state[:, 1, :, :]
lowerCamelCase_ =state[:, 2, :, :]
lowerCamelCase_ =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase_ =torch.tensor(__snake_case )
lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase_ =torch.tensor(__snake_case )
lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase_ ='''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase_ =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/an''' ):
lowerCamelCase_ =int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase_ ='''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.endswith('''/g''' ):
lowerCamelCase_ ='''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase_ ={'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase_ ='''model.%s.weight''' % nlayer
lowerCamelCase_ =vnp.copy() # same in embedded
lowerCamelCase_ =torch.tensor(__snake_case )
if key_name.startswith('''model/wte''' ):
lowerCamelCase_ ='''lm_head.weight'''
lowerCamelCase_ =vnp.copy() # same in embedded
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase_ ='''final_logits_bias'''
lowerCamelCase_ =vnp.copy() # same in embedded
lowerCamelCase_ =state.reshape((1, -1) )
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
lowerCamelCase_ ='''model.last_project.weight'''
lowerCamelCase_ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ =torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
lowerCamelCase_ ='''model.last_project.bias'''
lowerCamelCase_ =vnp.copy() # same because it is one dimensional
lowerCamelCase_ =torch.tensor(__snake_case )
torch.save(__snake_case , args.output )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
a_ : List[str] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 75
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : List[str] = 1
while repunit:
_UpperCAmelCase : Tuple = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCamelCase ( _lowerCAmelCase : int = 1000000 ) -> int:
_UpperCAmelCase : Any = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[str] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 352
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__ : int = TypeVar('T')
class UpperCAmelCase__ ( Generic[T]):
def __init__( self , lowercase = True ) -> None:
__UpperCamelCase = {} # dictionary of lists
__UpperCamelCase = directed
def __lowerCamelCase ( self , lowercase , lowercase ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
__UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__UpperCamelCase = [destination_vertex]
__UpperCamelCase = []
return self
def __repr__( self ) -> str:
return pformat(self.adj_list )
| 243
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
UpperCamelCase__ = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
UpperCamelCase__ = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ):
__lowerCAmelCase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowerCAmelCase = bs[:]
__lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
__lowerCAmelCase = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
class a__ ( snake_case__ ):
_a : str = VOCAB_FILES_NAMES
_a : List[str] = PRETRAINED_VOCAB_FILES_MAP
_a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _A , _A , _A="replace" , _A="<s>" , _A="</s>" , _A="</s>" , _A="<s>" , _A="<unk>" , _A="<pad>" , _A="<mask>" , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(_A )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase = errors # how to handle errors in decoding
__lowerCAmelCase = bytes_to_unicode()
__lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[1:-1]
__lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
__lowerCAmelCase = {}
__lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCAmelCase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return len(self.encoder )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = get_pairs(_A )
if not pairs:
return token
while True:
__lowerCAmelCase = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(_A ):
try:
__lowerCAmelCase = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(_A )
__lowerCAmelCase = new_word
if len(_A ) == 1:
break
else:
__lowerCAmelCase = get_pairs(_A )
__lowerCAmelCase = " ".join(_A )
__lowerCAmelCase = word
return word
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = []
for token in re.findall(self.pat , _A ):
__lowerCAmelCase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
return self.decoder.get(_A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = "".join(_A )
__lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
_A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + "\n" )
__lowerCAmelCase = 0
with open(_A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __SCREAMING_SNAKE_CASE( self , _A , _A = None ):
"""simple docstring"""
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE( self , _A , _A=False , **_A ):
"""simple docstring"""
__lowerCAmelCase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
__lowerCAmelCase = " " + text
return (text, kwargs)
| 92
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 278
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowercase ):
a__ : str = ["""image_processor""", """tokenizer"""]
a__ : List[Any] = """AutoImageProcessor"""
a__ : Optional[Any] = """AutoTokenizer"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> str:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.image_processor
def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : int ) -> str:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
__lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
__lowerCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __A ( self : str ) -> Any:
return ["input_ids", "attention_mask", "pixel_values"]
| 356
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def __A ( self : List[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def __A ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = ScoreSdeVeScheduler()
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[
0
]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Tuple ) -> str:
__lowerCamelCase = '''google/ncsnpp-church-256'''
__lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
sde_ve.to(SCREAMING_SNAKE_CASE__ )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 339
| 0
|
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE__ : Dict = 50000
SCREAMING_SNAKE_CASE__ : int = 5000
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = os.path.split(__file__)
SCREAMING_SNAKE_CASE__ : int = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = dataset[i]
@get_duration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
for i in range(0 ,len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with dataset.formatted_as(type=_SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Any = dataset[i]
@get_duration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
with dataset.formatted_as(type=_SCREAMING_SNAKE_CASE ):
for i in range(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = dataset[i : i + batch_size]
def A ( ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = {"num examples": SPEED_TEST_N_EXAMPLES}
lowerCamelCase : Dict = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
lowerCamelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
lowerCamelCase : Tuple = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
lowerCamelCase : List[Any] = generate_example_dataset(
os.path.join(_SCREAMING_SNAKE_CASE ,"dataset.arrow" ) ,_SCREAMING_SNAKE_CASE ,num_examples=_SCREAMING_SNAKE_CASE ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : str = func(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
print("shuffling dataset" )
lowerCamelCase : Optional[Any] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : int = func(
_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ,"wb" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 48
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> None:
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
snake_case_ = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 69
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase ="\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCAmelCase ="\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCAmelCase ="\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] ,)
def UpperCamelCase__ ( self ) -> str:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_="uniform_average" ,lowerCamelCase_=True ) -> List[Any]:
A = mean_squared_error(
_A ,_A ,sample_weight=_A ,multioutput=_A ,squared=_A )
return {"mse": mse}
| 363
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''OwlViTImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Tuple:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="max_length" ,lowerCamelCase_="np" ,**lowerCamelCase_ ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and not isinstance(text[0] ,lowerCamelCase_ )):
A = [self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )]
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and isinstance(text[0] ,lowerCamelCase_ ):
A = []
# Maximum number of queries across batch
A = max([len(lowerCamelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase_ ) != max_num_queries:
A = t + [""" """] * (max_num_queries - len(lowerCamelCase_ ))
A = self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
encodings.append(lowerCamelCase_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A = np.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A = torch.cat([encoding["""input_ids"""] for encoding in encodings] ,dim=0 )
A = torch.cat([encoding["""attention_mask"""] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A = tf.stack([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = tf.stack([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A = BatchEncoding()
A = input_ids
A = attention_mask
if query_images is not None:
A = BatchEncoding()
A = self.image_processor(
lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ).pixel_values
A = query_pixel_values
if images is not None:
A = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> int:
return self.image_processor.post_process(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase_ ,)
return self.image_processor
| 77
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ViltImageProcessor'''
lowercase__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : str , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , **snake_case_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
A__ = self.image_processor
def __call__( self : Optional[int] , snake_case_ : Tuple , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Optional[Any] , ) -> BatchEncoding:
'''simple docstring'''
A__ = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def __magic_name__ ( self : str , *snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : str , *snake_case_ : Optional[int] , **snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __magic_name__ ( self : str ) -> Dict:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def __magic_name__ ( self : Dict ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 247
|
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
lowercase__ = field(
default=A_, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(A_ )} )
lowercase__ = field(
default=A_, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowercase__ = field(
default=1_28, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowercase__ = field(
default=1_28, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
lowercase__ = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
lowercase__ = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
lowercase__ = field(
default=A_, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase__ = field(
default=A_, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowercase__ = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowercase__ = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
lowercase__ = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''train'''
lowercase__ = '''dev'''
class UpperCAmelCase_ ( A_ ):
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
def __init__( self : List[Any] , snake_case_ : SquadDataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int] = None , snake_case_ : Union[str, Split] = Split.train , snake_case_ : Optional[bool] = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = "pt" , ) -> Union[str, Any]:
'''simple docstring'''
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case_ , snake_case_ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
A__ = mode
# Load data features from cache or dataset file
A__ = "v2" if args.version_2_with_negative else "v1"
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(snake_case_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features["features"]
A__ = self.old_features.get("dataset" , snake_case_ )
A__ = self.old_features.get("examples" , snake_case_ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__, A__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case_ , )
A__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , snake_case_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case_ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids , dtype=torch.long )
A__ = torch.tensor(feature.attention_mask , dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
A__ = torch.tensor(feature.cls_index , dtype=torch.long )
A__ = torch.tensor(feature.p_mask , dtype=torch.float )
A__ = torch.tensor(feature.is_impossible , dtype=torch.float )
A__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position , dtype=torch.long )
A__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 247
| 1
|
'''simple docstring'''
from collections import deque
def a__ ( lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
_UpperCamelCase = deque()
_UpperCamelCase = [False for _ in range(lowercase )]
_UpperCamelCase = [-1 for _ in range(lowercase )]
_UpperCamelCase = index_of[:]
def strong_connect(lowercase : int, lowercase : Any, lowercase : Union[str, Any] ):
_UpperCamelCase = index # the number when this node is seen
_UpperCamelCase = index # lowest rank node reachable from here
index += 1
stack.append(lowercase )
_UpperCamelCase = True
for w in g[v]:
if index_of[w] == -1:
_UpperCamelCase = strong_connect(lowercase, lowercase, lowercase )
_UpperCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
_UpperCamelCase = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
_UpperCamelCase = []
_UpperCamelCase = stack.pop()
_UpperCamelCase = False
component.append(lowercase )
while w != v:
_UpperCamelCase = stack.pop()
_UpperCamelCase = False
component.append(lowercase )
components.append(lowercase )
return index
_UpperCamelCase = []
for v in range(lowercase ):
if index_of[v] == -1:
strong_connect(lowercase, 0, lowercase )
return components
def a__ ( lowercase : List[Any], lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCamelCase = [[] for _ in range(lowercase )]
for u, v in edges:
g[u].append(lowercase )
return g
if __name__ == "__main__":
# Test
lowercase__ : Optional[int] = 7
lowercase__ : Optional[int] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase__ : int = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase__ : Tuple = [(u, v) for u, v in zip(source, target)]
lowercase__ : List[Any] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 360
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase__ : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : List[Any] , *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 287
| 0
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCamelCase : Union[str, Any] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _snake_case ( lowerCAmelCase : Any=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class a__ ( A__ ):
A = None
A = None
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : List[str] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_module_factory(_A,cache_dir=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = import_main_class(dataset_module.module_path,dataset=_A )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=_A,config_name=_A,hash=dataset_module.hash,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_A ).replace(os.sep,"/" ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE_ : Optional[int] = cached_path(_A,cache_dir=_A )
self.assertTrue(os.path.exists(_A ) )
@pytest.mark.integration
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
SCREAMING_SNAKE_CASE_ : List[str] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE_ : Optional[int] = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE_ : Optional[Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset_module_factory("wikipedia" , cache_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : DatasetBuilder = builder_cls(
cache_dir=lowerCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE_ : List[Any] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["train"] , lowerCAmelCase )
assert next(iter(ds["train"] ) )
| 18
|
"""simple docstring"""
from __future__ import annotations
class snake_case :
def __init__( self , __UpperCAmelCase) ->Any:
a_ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float.")
if len(__UpperCAmelCase) != 0:
a_ = len(rows[0])
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float)):
raise error
a_ = rows
else:
a_ = []
def UpperCAmelCase__ ( self) ->list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))]
@property
def UpperCAmelCase__ ( self) ->int:
return len(self.rows)
@property
def UpperCAmelCase__ ( self) ->int:
return len(self.rows[0])
@property
def UpperCAmelCase__ ( self) ->tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase__ ( self) ->bool:
return self.order[0] == self.order[1]
def UpperCAmelCase__ ( self) ->Matrix:
a_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows)]
for row_num in range(self.num_rows)
]
return Matrix(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0])
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]))
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns))
def UpperCAmelCase__ ( self) ->bool:
return bool(self.determinant())
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int:
a_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns)
if other_column != column
]
for other_row in range(self.num_rows)
if other_row != row
]
return Matrix(__UpperCAmelCase).determinant()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int:
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase)
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Matrix:
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase) for column in range(self.num_columns)]
for row in range(self.num_rows)
])
def UpperCAmelCase__ ( self) ->Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns)
]
for row in range(self.minors().num_rows)
])
def UpperCAmelCase__ ( self) ->Matrix:
a_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns)]
for row in range(self.num_rows)
]
return Matrix(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Matrix:
a_ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse")
return self.adjugate() * (1 / determinant)
def __repr__( self) ->str:
return str(self.rows)
def __str__( self) ->str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0])) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase) for value in row]) + ".]"
for row in self.rows
])
+ "]"
)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None:
a_ = TypeError("Row must be a list containing all ints and/or floats")
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float)):
raise type_error
if len(__UpperCAmelCase) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix")
if position is None:
self.rows.append(__UpperCAmelCase)
else:
a_ = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None:
a_ = TypeError(
"Column must be a list containing all ints and/or floats")
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float)):
raise type_error
if len(__UpperCAmelCase) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix")
if position is None:
a_ = [self.rows[i] + [column[i]] for i in range(self.num_rows)]
else:
a_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows)
]
def __eq__( self , __UpperCAmelCase) ->bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __UpperCAmelCase) ->bool:
return not self == other
def __neg__( self) ->Matrix:
return self * -1
def __add__( self , __UpperCAmelCase) ->Matrix:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __sub__( self , __UpperCAmelCase) ->Matrix:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order")
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)]
for i in range(self.num_rows)
])
def __mul__( self , __UpperCAmelCase) ->Matrix:
if isinstance(__UpperCAmelCase , (int, float)):
return Matrix(
[[int(element * other) for element in row] for row in self.rows])
elif isinstance(__UpperCAmelCase , __UpperCAmelCase):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second")
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase) for column in other.columns()]
for row in self.rows
])
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix")
def __pow__( self , __UpperCAmelCase) ->Matrix:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase):
raise TypeError("A Matrix can only be raised to the power of an int")
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power")
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power")
a_ = self
for _ in range(other - 1):
result *= self
return result
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase) ->int:
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243
| 0
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("RGB" )
lowerCAmelCase_ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowerCAmelCase_ : str = transform(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
return image
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if "visual_encoder" in key:
lowerCAmelCase_ : Optional[int] = re.sub("visual_encoder*" , "vision_model.encoder" , __UpperCamelCase )
if "blocks" in key:
lowerCAmelCase_ : Optional[int] = re.sub(r"blocks" , "layers" , __UpperCamelCase )
if "attn" in key:
lowerCAmelCase_ : str = re.sub(r"attn" , "self_attn" , __UpperCamelCase )
if "norm1" in key:
lowerCAmelCase_ : str = re.sub(r"norm1" , "layer_norm1" , __UpperCamelCase )
if "norm2" in key:
lowerCAmelCase_ : int = re.sub(r"norm2" , "layer_norm2" , __UpperCamelCase )
if "encoder.norm" in key:
lowerCAmelCase_ : Tuple = re.sub(r"encoder.norm" , "post_layernorm" , __UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowerCAmelCase_ : str = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __UpperCamelCase )
if "encoder.pos_embed" in key:
lowerCAmelCase_ : List[str] = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , __UpperCamelCase )
if "encoder.cls_token" in key:
lowerCAmelCase_ : str = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , __UpperCamelCase )
if "self_attn" in key:
lowerCAmelCase_ : Union[str, Any] = re.sub(r"self_attn.proj" , "self_attn.projection" , __UpperCamelCase )
return key
@torch.no_grad()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None ) -> str:
"""simple docstring"""
if config_path is not None:
lowerCAmelCase_ : Tuple = BlipConfig.from_pretrained(__UpperCamelCase )
else:
lowerCAmelCase_ : List[Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
lowerCAmelCase_ : Any = BlipForConditionalGeneration(__UpperCamelCase ).eval()
lowerCAmelCase_ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
lowerCAmelCase_ : Dict = blip_decoder(pretrained=__UpperCamelCase , image_size=384 , vit="base" )
lowerCAmelCase_ : List[str] = pt_model.eval()
lowerCAmelCase_ : str = pt_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Optional[int] = modified_state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = rename_key(__UpperCamelCase )
lowerCAmelCase_ : Any = value
hf_model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = 384
lowerCAmelCase_ : Union[str, Any] = load_demo_image(image_size=__UpperCamelCase , device="cpu" )
lowerCAmelCase_ : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCAmelCase_ : Optional[int] = tokenizer(["a picture of"] ).input_ids
lowerCAmelCase_ : List[str] = hf_model.generate(__UpperCamelCase , __UpperCamelCase )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
lowerCAmelCase_ : Tuple = hf_model.generate(__UpperCamelCase )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowerCAmelCase_ : List[str] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
lowerCAmelCase_ : List[str] = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit="base" )
vqa_model.eval()
lowerCAmelCase_ : Union[str, Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : Tuple = modified_state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : Dict = rename_key(__UpperCamelCase )
lowerCAmelCase_ : List[str] = value
lowerCAmelCase_ : Optional[int] = BlipForQuestionAnswering(__UpperCamelCase )
hf_vqa_model.load_state_dict(__UpperCamelCase )
lowerCAmelCase_ : str = ["""How many dogs are in this image?"""]
lowerCAmelCase_ : Optional[Any] = tokenizer(__UpperCamelCase , return_tensors="pt" ).input_ids
lowerCAmelCase_ : List[str] = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
lowerCAmelCase_ : int = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
lowerCAmelCase_ : str = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit="base" )
itm_model.eval()
lowerCAmelCase_ : Union[str, Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
lowerCAmelCase_ : str = modified_state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : int = rename_key(__UpperCamelCase )
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Union[str, Any] = BlipForImageTextRetrieval(__UpperCamelCase )
lowerCAmelCase_ : Tuple = ["""A picture of a woman with a dog sitting in a beach"""]
lowerCAmelCase_ : Optional[Any] = tokenizer(
__UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase )
hf_itm_model.eval()
lowerCAmelCase_ : List[str] = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
lowerCAmelCase_ : Tuple = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 360
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a_ : Any , a_ : Any=None , a_ : int=None , a_ : str=None , a_ : Optional[int]="resnet50" , a_ : str=3 , a_ : str=32 , a_ : Union[str, Any]=3 , a_ : Tuple=True , a_ : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Dict = out_indices if out_indices is not None else [4]
lowerCAmelCase_ : int = stage_names
lowerCAmelCase_ : Optional[Any] = out_features
lowerCAmelCase_ : Tuple = backbone
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : List[Any] = num_channels
lowerCAmelCase_ : Optional[int] = use_pretrained_backbone
lowerCAmelCase_ : List[Any] = is_training
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : Dict ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCamelCase ( self : Union[str, Any] , a_ : str , a_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = TimmBackbone(config=a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : int = model(a_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = config_and_inputs
lowerCAmelCase_ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __lowerCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
a_ : int = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ : Union[str, Any] = False
a_ : str = False
a_ : List[Any] = False
a_ : Dict = False
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = TimmBackboneModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def lowerCamelCase ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = "resnet18"
lowerCAmelCase_ : List[Any] = "microsoft/resnet-18"
lowerCAmelCase_ : Tuple = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ )
lowerCAmelCase_ : str = AutoBackbone.from_pretrained(a_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase_ : Dict = AutoBackbone.from_pretrained(a_ , use_timm_backbone=a_ , out_indices=[1, 2, 3] )
lowerCAmelCase_ : Any = AutoBackbone.from_pretrained(a_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowerCamelCase ( self : List[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : str ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : Any ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowerCamelCase ( self : List[str] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Tuple ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowerCamelCase ( self : Dict ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowerCamelCase ( self : int ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(a_ )
lowerCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase_ : int = self.all_model_classes[0]
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(a_ , a_ )
lowerCAmelCase_ : str = model(**a_ )
lowerCAmelCase_ : Any = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase_ : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase_ : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(**a_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase_ : Optional[int] = copy.deepcopy(a_ )
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Any = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(**a_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase_ : str = copy.deepcopy(a_ )
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = model_class(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(**a_ )
| 161
| 0
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase_ : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCAmelCase_ : int = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCAmelCase_ : Union[str, Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def UpperCamelCase__ ( self : List[Any] ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def UpperCamelCase__ ( self : str , __a : Optional[int] , __a : List[str] , __a : str=None , __a : List[str]="uniform_average" , __a : Tuple=True ):
_a = mean_squared_error(
__a , __a , sample_weight=__a , multioutput=__a , squared=__a )
return {"mse": mse}
| 63
|
from __future__ import annotations
def A ( _UpperCAmelCase : list[int] ) -> bool:
'''simple docstring'''
return len(set(_UpperCAmelCase ) ) == len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__lowerCAmelCase = dict(zip(vocab, range(len(vocab))))
__lowerCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(tmpdirname)
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__lowerCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__lowerCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCAmelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
__lowerCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__lowerCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 288
|
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[str]:
if nth_term == "":
return [""]
lowercase__: Tuple = int(snake_case )
lowercase__: int = int(snake_case )
lowercase__: list[str] = []
for temp in range(int(snake_case ) ):
series.append(f'1 / {pow(temp + 1 , int(snake_case ) )}' if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = int(input('''Enter the last number (nth term) of the P-Series'''))
__lowerCAmelCase = int(input('''Enter the power for P-Series'''))
print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''')
print(p_series(nth_term, power))
| 288
| 1
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__magic_name__ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__magic_name__ = concatenate_datasets
__magic_name__ = DownloadConfig
__magic_name__ = DownloadManager
__magic_name__ = DownloadMode
__magic_name__ = DownloadConfig
__magic_name__ = DownloadMode
__magic_name__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 100
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : str = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase__ : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase__ : ClassVar[Features] = Features({})
lowerCamelCase__ : str = "text"
@property
def _UpperCAmelCase ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 77
| 0
|
import math
import tensorflow as tf
from packaging import version
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
lowercase_ :Dict = tf.convert_to_tensor(_a )
lowercase_ :Union[str, Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Dict = tf.convert_to_tensor(_a )
lowercase_ :List[str] = tf.cast(math.pi , x.dtype )
lowercase_ :Optional[int] = tf.cast(0.044_715 , x.dtype )
lowercase_ :Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_a , 3 )) ))
return x * cdf
def UpperCamelCase ( _a ) -> Any:
'''simple docstring'''
lowercase_ :List[str] = tf.convert_to_tensor(_a )
return x * tf.tanh(tf.math.softplus(_a ) )
def UpperCamelCase ( _a ) -> List[Any]:
'''simple docstring'''
lowercase_ :Dict = tf.convert_to_tensor(_a )
lowercase_ :Optional[Any] = tf.cast(0.044_715 , x.dtype )
lowercase_ :str = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :Union[str, Any] = tf.convert_to_tensor(_a )
lowercase_ :Union[str, Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
return tf.clip_by_value(_gelu(_a ) , -1_0 , 1_0 )
def UpperCamelCase ( _a , _a=-1 ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ :Any = tf.split(_a , 2 , axis=_a )
return a * tf.math.sigmoid(_a )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def UpperCamelCase ( _a ) -> Dict:
'''simple docstring'''
return tf.keras.activations.gelu(_a , approximate=_a )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.keras.activations.gelu
SCREAMING_SNAKE_CASE : Optional[int] = approximate_gelu_wrap
else:
SCREAMING_SNAKE_CASE : int = _gelu
SCREAMING_SNAKE_CASE : Union[str, Any] = _gelu_new
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 252
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Dict =MBartConfig
lowercase : Union[str, Any] ={}
lowercase : Optional[int] ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :int = parent
lowercase_ :Any = batch_size
lowercase_ :Any = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[Any] = use_labels
lowercase_ :List[str] = vocab_size
lowercase_ :Union[str, Any] = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Any = intermediate_size
lowercase_ :str = hidden_dropout_prob
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :str = eos_token_id
lowercase_ :List[Any] = pad_token_id
lowercase_ :List[str] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[Any] = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Any = inputs_dict['''input_ids''']
lowercase_ :List[Any] = input_ids[:1, :]
lowercase_ :List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :str = inputs_dict['''head_mask''']
lowercase_ :List[str] = 1
# first forward pass
lowercase_ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :int = outputs.to_tuple()
lowercase_ :List[Any] = past_key_values[1]
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : Optional[Any] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Optional[Any] =(
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[Any] =True
lowercase : Optional[Any] =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = TFMBartModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] =[
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase : Optional[int] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase : Any ="""facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ :Any = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = OmegaConf.load(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location="cpu" )["""model"""]
lowerCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase_ = {}
lowerCAmelCase_ = """first_stage_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase_ = {}
lowerCAmelCase_ = """model.diffusion_model."""
for key in keys:
if key.startswith(__lowerCAmelCase ):
lowerCAmelCase_ = state_dict[key]
lowerCAmelCase_ = config.model.params.first_stage_config.params
lowerCAmelCase_ = config.model.params.unet_config.params
lowerCAmelCase_ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
lowerCAmelCase_ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
lowerCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
lowerCAmelCase_ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", type=str, required=True)
parser.add_argument("--config_path", type=str, required=True)
parser.add_argument("--output_path", type=str, required=True)
_A = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 231
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower_vision_model"""
def __init__( self , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=3 , __magic_name__=1_6 , __magic_name__=2_8_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__=True , __magic_name__=False , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = num_hidden_layers
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = initializer_factor
lowerCamelCase : Tuple = layer_norm_eps
lowerCamelCase : Tuple = stop_gradient
lowerCamelCase : Optional[int] = share_layernorm
lowerCamelCase : str = remove_last_layer
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : str = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Union[str, Any] = """bridgetower_text_model"""
def __init__( self , __magic_name__=5_0_2_6_5 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=1 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_4 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ):
super().__init__(**__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_act
lowerCamelCase : Optional[int] = initializer_factor
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : List[str] = use_cache
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , **__magic_name__ ):
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(__magic_name__ , **__magic_name__ )
if config_dict.get("""model_type""" ) == "bridgetower":
lowerCamelCase : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__magic_name__ , **__magic_name__ )
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """bridgetower"""
def __init__( self , __magic_name__=True , __magic_name__="gelu" , __magic_name__=7_6_8 , __magic_name__=1 , __magic_name__=1e-05 , __magic_name__=False , __magic_name__="add" , __magic_name__=1_2 , __magic_name__=6 , __magic_name__=False , __magic_name__=False , __magic_name__=None , __magic_name__=None , **__magic_name__ , ):
# TODO: remove this once the Hub files are updated.
lowerCamelCase : int = kwargs.pop("""text_config_dict""" , __magic_name__ )
lowerCamelCase : str = kwargs.pop("""vision_config_dict""" , __magic_name__ )
super().__init__(**__magic_name__ )
lowerCamelCase : str = share_cross_modal_transformer_layers
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : str = hidden_size
lowerCamelCase : Tuple = initializer_factor
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = share_link_tower_layers
lowerCamelCase : List[Any] = link_tower_type
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Union[str, Any] = tie_word_embeddings
lowerCamelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
lowerCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
lowerCamelCase : Any = BridgeTowerTextConfig(**__magic_name__ )
lowerCamelCase : Optional[Any] = BridgeTowerVisionConfig(**__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls , __magic_name__ , __magic_name__ , **__magic_name__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__magic_name__ )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = copy.deepcopy(self.__dict__ )
lowerCamelCase : int = self.text_config.to_dict()
lowerCamelCase : Dict = self.vision_config.to_dict()
lowerCamelCase : List[str] = self.__class__.model_type
return output
| 287
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
__UpperCamelCase ='''_'''
if count > 1:
return False
else:
return "".join(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[]
while True:
__UpperCamelCase =['''$'''] * len(__UpperCamelCase )
__UpperCamelCase =[]
for i in range(len(__UpperCamelCase ) ):
for j in range(i + 1 , len(__UpperCamelCase ) ):
__UpperCamelCase =compare_string(binary[i] , binary[j] )
if k is False:
__UpperCamelCase ='''*'''
__UpperCamelCase ='''*'''
temp.append('''X''' )
for i in range(len(__UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__UpperCamelCase ) == 0:
return pi
__UpperCamelCase =list(set(__UpperCamelCase ) )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Sequence[float] ):
"""simple docstring"""
__UpperCamelCase =[]
for minterm in minterms:
__UpperCamelCase =''''''
for _ in range(__UpperCamelCase ):
__UpperCamelCase =str(minterm % 2 ) + string
minterm //= 2
temp.append(__UpperCamelCase )
return temp
def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =list(__UpperCamelCase )
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase (__UpperCamelCase : list[list[int]] , __UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[]
__UpperCamelCase =[0] * len(__UpperCamelCase )
for i in range(len(chart[0] ) ):
__UpperCamelCase =0
__UpperCamelCase =-1
for j in range(len(__UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
__UpperCamelCase =j
if count == 1:
__UpperCamelCase =1
for i in range(len(__UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__UpperCamelCase ) ):
__UpperCamelCase =0
temp.append(prime_implicants[i] )
while True:
__UpperCamelCase =0
__UpperCamelCase =-1
__UpperCamelCase =0
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =chart[i].count(1 )
if count_n > max_n:
__UpperCamelCase =count_n
__UpperCamelCase =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__UpperCamelCase ) ):
__UpperCamelCase =0
def lowerCAmelCase (__UpperCamelCase : list[str] , __UpperCamelCase : list[str] ):
"""simple docstring"""
__UpperCamelCase =[[0 for x in range(len(__UpperCamelCase ) )] for x in range(len(__UpperCamelCase ) )]
for i in range(len(__UpperCamelCase ) ):
__UpperCamelCase =prime_implicants[i].count('''_''' )
for j in range(len(__UpperCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , __UpperCamelCase ):
__UpperCamelCase =1
return chart
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =int(input('''Enter the no. of variables\n''' ) )
__UpperCamelCase =[
float(__UpperCamelCase )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
__UpperCamelCase =decimal_to_binary(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =check(__UpperCamelCase )
print('''Prime Implicants are:''' )
print(__UpperCamelCase )
__UpperCamelCase =prime_implicant_chart(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =selection(__UpperCamelCase , __UpperCamelCase )
print('''Essential Prime Implicants are:''' )
print(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''albert'''
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=30000 , UpperCamelCase__ : int=128 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Any=16384 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Optional[int]="gelu_new" , UpperCamelCase__ : int=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Tuple=1E-12 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=3 , **UpperCamelCase__ : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =vocab_size
__UpperCamelCase =embedding_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_hidden_groups
__UpperCamelCase =num_attention_heads
__UpperCamelCase =inner_group_num
__UpperCamelCase =hidden_act
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =classifier_dropout_prob
__UpperCamelCase =position_embedding_type
class _lowercase ( __a ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCamelCase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 85
| 0
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
while b:
__lowerCamelCase , __lowerCamelCase : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 73
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Union[str, Any] , _A :List[Any]=0.01 , _A :Optional[Any]=1_000 ) -> Tuple:
'''simple docstring'''
__A = p_stop
__A = max_length
def __iter__( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A = 0
__A = False
while not stop and count < self.max_length:
yield count
count += 1
__A = random.random() < self.p_stop
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] , _A :Tuple , _A :int , _A :Tuple=False , _A :str=True ) -> Optional[int]:
'''simple docstring'''
__A = [
BatchSamplerShard(_A , 2 , _A , split_batches=_A , even_batches=_A )
for i in range(2 )
]
__A = [list(_A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_A ) for shard in batch_sampler_shards] , [len(_A ) for e in expected] )
self.assertListEqual(_A , _A )
def lowercase_ ( self :Any ) -> int:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_A , _A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A )
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A )
def lowercase_ ( self :Tuple ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_A )
__A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , even_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_A )
# Expected shouldn't change
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size.
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_A )
__A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
# Check the shards when the dataset is very small.
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[[0, 1]], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
__A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_A )
__A = [[], []]
self.check_batch_sampler_shards(_A , _A , split_batches=_A , even_batches=_A )
def lowercase_ ( self :Tuple ) -> Dict:
'''simple docstring'''
__A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__A = [BatchSamplerShard(_A , 2 , _A , even_batches=_A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase_ ( self :int , _A :Optional[Any] , _A :List[str] , _A :Dict , _A :Any=False , _A :str=2 , _A :Any=False ) -> Dict:
'''simple docstring'''
random.seed(_A )
__A = list(_A )
__A = [
IterableDatasetShard(
_A , batch_size=_A , drop_last=_A , num_processes=_A , process_index=_A , split_batches=_A , )
for i in range(_A )
]
__A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_A )
iterable_dataset_lists.append(list(_A ) )
__A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(len(_A ) % shard_batch_size == 0 )
__A = []
for idx in range(0 , len(_A ) , _A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_A ) < len(_A ):
reference += reference
self.assertListEqual(_A , reference[: len(_A )] )
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
__A = 42
__A = RandomIterableDataset()
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
# Edge case with a very small dataset
__A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
self.check_iterable_dataset_shards(_A , _A , batch_size=4 , drop_last=_A , split_batches=_A )
def lowercase_ ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
__A = BatchSampler(range(16 ) , batch_size=4 , drop_last=_A )
__A = SkipBatchSampler(_A , 2 )
self.assertListEqual(list(_A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :List[str] ) -> Any:
'''simple docstring'''
__A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Any ) -> Dict:
'''simple docstring'''
__A = DataLoader(list(range(16 ) ) , batch_size=4 )
__A = skip_first_batches(_A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase_ ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase_ ( self :Dict ) -> Any:
'''simple docstring'''
Accelerator()
__A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 161
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case )
return image
@property
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase__ ( self : str ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(snake_case )
@property
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
def extract(*snake_case : Any , **snake_case : List[str] ):
class a :
"""simple docstring"""
def __init__( self : List[Any] ) -> str:
__UpperCAmelCase : Dict = torch.ones([0] )
def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> List[Any]:
self.pixel_values.to(snake_case )
return self
return Out()
return extract
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
__UpperCAmelCase : Optional[int] = PNDMScheduler(skip_prk_steps=snake_case )
__UpperCAmelCase : Optional[int] = self.dummy_vae
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__UpperCAmelCase : Dict = 77
__UpperCAmelCase : Optional[int] = self.dummy_image.to(snake_case )
__UpperCAmelCase : Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
__UpperCAmelCase : Tuple = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : Any = torch.Generator(device=snake_case ).manual_seed(0 )
__UpperCAmelCase : Any = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=snake_case , )
__UpperCAmelCase : Optional[int] = output.images
__UpperCAmelCase : Tuple = torch.Generator(device=snake_case ).manual_seed(0 )
__UpperCAmelCase : int = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=snake_case , return_dict=snake_case , )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Optional[int] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.dummy_cond_unet
__UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=snake_case )
__UpperCAmelCase : Optional[Any] = self.dummy_vae
__UpperCAmelCase : List[str] = self.dummy_text_encoder
__UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__UpperCAmelCase : Any = 77
__UpperCAmelCase : Optional[int] = self.dummy_image.to(snake_case )
# put models in fp16
__UpperCAmelCase : Dict = unet.half()
__UpperCAmelCase : Dict = vae.half()
__UpperCAmelCase : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : Tuple = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
__UpperCAmelCase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
__UpperCAmelCase : Optional[int] = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
__UpperCAmelCase : Optional[int] = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__UpperCAmelCase : str = alt_pipe(
[prompt] , generator=snake_case , num_inference_steps=2 , output_type='''np''' , image=snake_case , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase : Any = init_image.resize((760, 504) )
__UpperCAmelCase : Union[str, Any] = '''BAAI/AltDiffusion'''
__UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
__UpperCAmelCase : int = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : Any = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type='''np''' , )
__UpperCAmelCase : Tuple = output.images[0]
__UpperCAmelCase : int = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__UpperCAmelCase : List[Any] = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__UpperCAmelCase : Union[str, Any] = init_image.resize((768, 512) )
__UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
__UpperCAmelCase : Optional[Any] = '''BAAI/AltDiffusion'''
__UpperCAmelCase : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
__UpperCAmelCase : List[Any] = '''A fantasy landscape, trending on artstation'''
__UpperCAmelCase : Tuple = torch.manual_seed(0 )
__UpperCAmelCase : Dict = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type='''np''' , )
__UpperCAmelCase : Any = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 355
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase :Union[str, Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Dict = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 240
| 0
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
def wrapper(*__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
_snake_case = timeit.default_timer()
_snake_case = func(*__lowerCamelCase , **__lowerCamelCase )
_snake_case = timeit.default_timer() - starttime
return delta
_snake_case = func.__name__
return wrapper
def _UpperCAmelCase ( __lowerCamelCase : dict , __lowerCamelCase : Tuple=1_00 , __lowerCamelCase : Dict=None ) -> Optional[Any]:
_snake_case = []
_snake_case = seq_shapes or {}
for i in range(__lowerCamelCase ):
_snake_case = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
_snake_case = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
_snake_case = '''The small grey turtle was surprisingly fast when challenged.'''
else:
_snake_case = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
_snake_case = v.feature
_snake_case = seq_shapes[k]
_snake_case = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
_snake_case = data
dummy_data.append((i, example) )
return dummy_data
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict=1_00 , __lowerCamelCase : Tuple=None ) -> str:
_snake_case = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
_snake_case = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
_snake_case , _snake_case = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_snake_case = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 288
|
"""simple docstring"""
from math import sqrt
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = 0
_snake_case = 0
_snake_case = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"{solution() = }")
| 288
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ (__lowerCamelCase ):
return DownloadCommand(args.model, args.cache_dir, args.force, args.trust_remote_code )
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase_ ( __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=__lowerCamelCase , default=__lowerCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=__lowerCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=__lowerCamelCase )
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Any = model
_SCREAMING_SNAKE_CASE : Optional[int] = cache
_SCREAMING_SNAKE_CASE : str = force
_SCREAMING_SNAKE_CASE : str = trust_remote_code
def UpperCamelCase_ ( self ) -> Optional[Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 325
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["input_features"]
def __init__( self , A=80 , A=1_60_00 , A=1_60 , A=30 , A=4_00 , A=0.0 , A=False , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase = n_fft
lowerCamelCase = hop_length
lowerCamelCase = chunk_length
lowerCamelCase = chunk_length * sampling_rate
lowerCamelCase = self.n_samples // hop_length
lowerCamelCase = sampling_rate
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , )
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = np.maximum(A , log_spec.max() - 8.0 )
lowerCamelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __A ( A , A , A = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCamelCase = np.array(A , np.intaa )
lowerCamelCase = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
lowerCamelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase = padding_value
normed_input_values.append(A )
else:
lowerCamelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
lowerCamelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase = padded_inputs.convert_to_tensors(A )
return padded_inputs
def __A ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 252
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : List[str] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252
| 1
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> float:
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowerCamelCase : List[Any] = get_tests_dir('''fixtures''')
__lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCamelCase : Any = get_tests_dir('''fixtures/dummy-config.json''')
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def __a ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , revision="""aaaaaa""" )
def __a ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
_lowercase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __a ( self : str ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoFeatureExtractor.register(_lowercase , _lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __a ( self : Any ):
"""simple docstring"""
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = True
try:
AutoConfig.register("""custom""" , _lowercase )
AutoFeatureExtractor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowercase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_lowercase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 204
| 1
|
from heapq import heappop, heappush
import numpy as np
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : tuple[int, int] ,__lowercase : tuple[int, int] ,__lowercase : bool ,):
'''simple docstring'''
A_ , A_ : Optional[int] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : Optional[Any] = [(0, source)], set()
A_ : Dict = np.full((rows, cols) ,np.inf )
A_ : Union[str, Any] = 0
A_ : Dict = np.empty((rows, cols) ,dtype=__lowercase )
A_ : Dict = None
while queue:
((A_) , (A_)) : Dict = heappop(__lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : List[str] = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : Optional[Any] = predecessors[x, y]
path.append(__lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowercase ) ):
A_ , A_ : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : str = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowercase ,(dist + 1, (nx, ny)) )
A_ : Any = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Tuple = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
| 0
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__snake_case : Tuple = 'Usage of script: script_name <size_of_canvas:int>'
__snake_case : str = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowercase ( __snake_case ) -> list[list[bool]]:
__lowerCAmelCase : Any = [[False for i in range(__snake_case )] for j in range(__snake_case )]
return canvas
def _lowercase ( __snake_case ) -> None:
for i, row in enumerate(__snake_case ):
for j, _ in enumerate(__snake_case ):
__lowerCAmelCase : List[Any] = bool(random.getrandbits(1 ) )
def _lowercase ( __snake_case ) -> list[list[bool]]:
__lowerCAmelCase : int = np.array(__snake_case )
__lowerCAmelCase : int = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__snake_case ):
for c, pt in enumerate(__snake_case ):
__lowerCAmelCase : Union[str, Any] = __judge_point(
__snake_case ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCAmelCase : int = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCAmelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def _lowercase ( __snake_case ,__snake_case ) -> bool:
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : int = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCAmelCase : List[str] = pt
if pt:
if alive < 2:
__lowerCAmelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCAmelCase : Any = True
elif alive > 3:
__lowerCAmelCase : int = False
else:
if alive == 3:
__lowerCAmelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__snake_case : Optional[int] = int(sys.argv[1])
# main working structure of this module.
__snake_case : Dict = create_canvas(canvas_size)
seed(c)
__snake_case , __snake_case : int = plt.subplots()
fig.show()
__snake_case : int = ListedColormap(['w', 'k'])
try:
while True:
__snake_case : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 58
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
# TODO Update this
__snake_case : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'esm'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: int=768 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[Any]=12 , _SCREAMING_SNAKE_CASE: Optional[int]=3072 , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=1026 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[Any]="absolute" , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Tuple=None , **_SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , mask_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : List[str] = emb_layer_norm_before
__lowerCAmelCase : Tuple = token_dropout
__lowerCAmelCase : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values.")
__lowerCAmelCase : str = EsmFoldConfig()
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = EsmFoldConfig(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
__lowerCAmelCase : List[Any] = get_default_vocab_list()
else:
__lowerCAmelCase : Tuple = vocab_list
else:
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _SCREAMING_SNAKE_CASE):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = super().to_dict()
if isinstance(self.esmfold_config , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = None
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowerCAmelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = TrunkConfig(**self.trunk)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = asdict(self)
__lowerCAmelCase : Tuple = self.trunk.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4_8
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = None
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
if self.structure_module is None:
__lowerCAmelCase : Optional[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Any = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
__lowerCAmelCase : int = self.sequence_state_dim // self.sequence_head_width
__lowerCAmelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : int = asdict(self)
__lowerCAmelCase : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0
SCREAMING_SNAKE_CASE = 1e-8
SCREAMING_SNAKE_CASE = 1e5
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
return asdict(self)
def _lowercase ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 58
| 1
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_A = datasets.logging.get_logger(__name__)
_A = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_A = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_A = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowerCamelCase__ ( a__ : List[Any] , a__ : int , a__ : Dict=False , a__ : List[Any]=False , a__ : Optional[Any]=True , a__ : Dict=False , a__ : Any="dummy_doc" ) -> int:
UpperCamelCase_ = {doc: key_lines}
UpperCamelCase_ = {doc: sys_lines}
UpperCamelCase_ = {}
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ , UpperCamelCase_ = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCamelCase_ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCamelCase_ = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
UpperCamelCase_ , UpperCamelCase_ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCamelCase_ , UpperCamelCase_ = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCamelCase_ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def lowerCamelCase__ ( a__ : Any , a__ : Tuple , a__ : str , a__ : Dict , a__ : List[Any] , a__ : List[str] , a__ : Any ) -> Any:
UpperCamelCase_ = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ = {}
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for name, metric in metrics:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
UpperCamelCase_ = (conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowerCamelCase__ ( a__ : str ) -> List[Any]:
UpperCamelCase_ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCamelCase_ = line.split()[5]
if not parse_col == "-":
UpperCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False ):
"""simple docstring"""
UpperCamelCase_ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCamelCase_ = util.check_gold_parse_annotation(__snake_case )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use \'min_span\'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCamelCase_ = evaluate(
key_lines=__snake_case , sys_lines=__snake_case , metrics=__snake_case , NP_only=__snake_case , remove_nested=__snake_case , keep_singletons=__snake_case , min_span=__snake_case , )
return score
| 122
|
import argparse
snake_case : int = '''docs/source/_static/js/custom.js'''
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
with open(__lowerCAmelCase , encoding='utf-8' , newline='\n' ) as f:
a__ = f.readlines()
a__ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
a__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(__lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
snake_case : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 240
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCamelCase__ = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=__lowerCAmelCase , text_to_image_strength=0.75 , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = generator.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt="""first prompt""" , image=__lowerCAmelCase , text_to_image_strength=0.75 , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _lowerCamelCase ( self ):
UpperCamelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
UpperCamelCase__ = """cyberpunk 2077"""
UpperCamelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.dual_guided(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , text_to_image_strength=0.75 , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCamelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCamelCase__ = """A painting of a squirrel eating a burger """
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe.text_to_image(
prompt=__lowerCAmelCase , generator=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCamelCase__ = pipe.image_variation(__lowerCAmelCase , generator=__lowerCAmelCase , output_type="""numpy""" ).images
UpperCamelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 360
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
SCREAMING_SNAKE_CASE :Dict = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def UpperCAmelCase ( a_ , a_=None , a_=None , a_=None ) -> Tuple:
"""simple docstring"""
__A = True
while ask_again:
__A = input(a_ )
try:
if default is not None and len(a_ ) == 0:
return default
return convert_value(a_ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a_ )
def UpperCAmelCase ( a_ , a_=[] , a_=None , a_=0 ) -> List[str]:
"""simple docstring"""
__A = BulletMenu(a_ , a_ )
__A = menu.run(default_choice=a_ )
return convert_value(a_ ) if convert_value is not None else result
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
__A = int(a_ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = int(a_ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
__A = int(a_ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = int(a_ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = int(a_ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ,A : Any ,A : Optional[int] ,A : List[str] ,A : str ):
__A = super()._format_usage(A ,A ,A ,A )
__A = usage.replace("<command> [<args>] " ,"" )
return usage
| 124
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
snake_case_ = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Optional[Any] ):
__A = "こんにちは、世界。 \nこんばんは、世界。"
__A = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase_ ( self : Any ,A : Optional[int] ):
__A , __A = self.get_input_output_texts(A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = tokenizer.decode(A ,clean_up_tokenization_spaces=A )
return text, ids
def UpperCamelCase_ ( self : int ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : int ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[int] ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : List[Any] ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCamelCase_ ( self : int ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="mecab" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Any ):
__A = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : List[str] ):
try:
__A = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Tuple ):
try:
__A = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Tuple ):
__A = MecabTokenizer(do_lower_case=A ,mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
def UpperCamelCase_ ( self : Any ):
try:
__A = MecabTokenizer(
do_lower_case=A ,normalize_text=A ,mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] ,)
def UpperCamelCase_ ( self : int ):
__A = MecabTokenizer(normalize_text=A ,mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] ,)
@require_sudachi
def UpperCamelCase_ ( self : int ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="sudachi" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_sudachi
def UpperCamelCase_ ( self : Optional[int] ):
__A = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : List[Any] ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase_ ( self : int ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase_ ( self : Tuple ):
__A = SudachiTokenizer(sudachi_dict_type="core" ,sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) ,["外国人参政権"] )
@require_sudachi
def UpperCamelCase_ ( self : int ):
__A = SudachiTokenizer(do_lower_case=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : List[str] ):
__A = SudachiTokenizer(normalize_text=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,[" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] ,)
@require_sudachi
def UpperCamelCase_ ( self : str ):
__A = SudachiTokenizer(trim_whitespace=A ,sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="jumanpp" )
self.assertIsNotNone(A )
__A = "こんにちは、世界。\nこんばんは、世界。"
__A = tokenizer.tokenize(A )
self.assertListEqual(A ,["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
__A = os.path.join(self.tmpdirname ,"tokenizer.bin" )
with open(A ,"wb" ) as handle:
pickle.dump(A ,A )
with open(A ,"rb" ) as handle:
__A = pickle.load(A )
__A = tokenizer_new.tokenize(A )
self.assertListEqual(A ,A )
@require_jumanpp
def UpperCamelCase_ ( self : int ):
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : str ):
__A = JumanppTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Any ):
__A = JumanppTokenizer(normalize_text=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : List[str] ):
__A = JumanppTokenizer(trim_whitespace=A )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) ,["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] ,)
@require_jumanpp
def UpperCamelCase_ ( self : Dict ):
__A = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) ,["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] ,)
def UpperCamelCase_ ( self : str ):
__A = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
__A = {}
for i, token in enumerate(A ):
__A = i
__A = WordpieceTokenizer(vocab=A ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) ,["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) ,["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase_ ( self : Any ):
__A = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
__A = tokenizer.subword_tokenizer
__A = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(A ,["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
__A = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(A ,["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase_ ( self : int ):
__A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
__A = tokenizer.encode("ありがとう。" ,add_special_tokens=A )
__A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = BertJapaneseTokenizer
snake_case_ = False
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : int ,**A : str ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="character" ,**A )
def UpperCamelCase_ ( self : List[str] ,A : str ):
__A = "こんにちは、世界。 \nこんばんは、世界。"
__A = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : Any ):
pass # TODO add if relevant
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="character" )
__A = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
A ,["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__A = {}
for i, token in enumerate(A ):
__A = i
__A = CharacterTokenizer(vocab=A ,unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) ,[] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) ,["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) ,["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase_ ( self : Tuple ):
__A = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
__A = tokenizer.encode("ありがとう。" ,add_special_tokens=A )
__A = tokenizer.encode("どういたしまして。" ,add_special_tokens=A )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : int ):
__A = "cl-tohoku/bert-base-japanese"
__A = AutoTokenizer.from_pretrained(A )
self.assertIsInstance(A ,A )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple ):
__A = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" ,level="WARNING" ) as cm:
BertTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
__A = "bert-base-cased"
with self.assertLogs("transformers" ,level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(A )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 124
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : Optional[int] = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = {}
lowerCamelCase_ = os.path.join(lowercase , 'all_results.json' )
if os.path.exists(lowercase ):
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = json.load(lowercase )
else:
raise ValueError(f"""can't find {path}""" )
return results
lowerCamelCase : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
import xla_spawn
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A_ , 'argv' , A_ ):
lowerCamelCase_ = time()
xla_spawn.main()
lowerCamelCase_ = time()
lowerCamelCase_ = get_results(A_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def a__ ( self : Any ) -> str:
"""simple docstring"""
import xla_spawn
lowerCamelCase_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(A_ , 'argv' , A_ ):
xla_spawn.main()
| 204
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : bool = True , **A_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ = do_convert_rgb
def a__ ( self : Dict , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=size['shortest_edge'] , default_to_square=A_ )
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : str , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> str:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : int = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : bool = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , param_name='size' , default_to_square=A_ )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' , default_to_square=A_ )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 204
| 1
|
"""simple docstring"""
class __A:
def __init__( self ) -> Any:
'''simple docstring'''
__a = {}
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_snake_case , ''' -> ''' , ''' -> '''.join([str(_snake_case ) for j in self.vertex[i]] ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_snake_case )
else:
# else make a new vertex
__a = [to_vertex]
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> None:
'''simple docstring'''
__a = True
print(_snake_case , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_snake_case , _snake_case )
if __name__ == "__main__":
A : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 357
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ['PerceiverFeatureExtractor']
A : int = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] , a: int , a: List[Any] , a: Any ):
self.assertEqual(len(a ) , len(a ) )
for a, b in zip(a , a ):
self.assertAlmostEqual(a , a , delta=a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _snake_case ( self: str ):
__lowerCamelCase : Any = None
ops.enable_eager_execution_internal()
__lowerCamelCase : List[str] = tf.config.list_physical_devices('CPU' )
if len(a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowerCamelCase : Dict = tf.config.list_logical_devices(device_type='CPU' )
__lowerCamelCase : List[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowerCamelCase : Optional[Any] = GradientAccumulator()
__lowerCamelCase : int = tf.Variable([4.0, 3.0] )
__lowerCamelCase , __lowerCamelCase : Any = create_optimizer(5e-5 , 10 , 5 )
__lowerCamelCase : Any = tf.Variable([0.0, 0.0] , trainable=a )
def accumulate_on_replica(a: str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(a: List[Any] , a: List[str] ):
with strategy.scope():
__lowerCamelCase : Optional[int] = strategy.experimental_local_results(a )
local_variables[0].assign(a )
local_variables[1].assign(a )
strategy.run(a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(a )
def _check_local_values(a: str , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , a , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , a , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 194
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['ConvNextFeatureExtractor']
lowercase_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 194
| 1
|
a__: Tuple = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCamelCase__( UpperCamelCase__ : str )->Any:
A__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00}
A__ = 0
A__ = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase__( UpperCamelCase__ : int )->List[Any]:
A__ = []
for arabic, roman in ROMAN:
(A__) = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
|
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 87
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
'''simple docstring'''
A__ = list(s_dict.keys() )
for key in keys:
A__ = R'.*/layers_(\d+)'
A__ = key
if re.match(a_ , a_ ):
A__ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , a_ )
A__ = R'(encoder|decoder)\/'
if re.match(a_ , a_ ):
A__ = re.match(a_ , a_ ).groups()
if groups[0] == "encoder":
A__ = re.sub(R'/mlp/' , R'/1/mlp/' , a_ )
A__ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , a_ )
elif groups[0] == "decoder":
A__ = re.sub(R'/mlp/' , R'/2/mlp/' , a_ )
A__ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , a_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A__ = new_key.replace(a_ , a_ )
print(f'{key} -> {new_key}' )
A__ = s_dict.pop(a_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A__ = s_dict[key].shape[0]
A__ = s_dict[key]
for idx in range(a_ ):
A__ = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(a_ )
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
import regex as re
with open(a_ , 'r' ) as f:
A__ = f.read()
A__ = re.findall(R'(.*) = ([0-9.]*)' , a_ )
A__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A__ = float(a_ ) if '.' in value else int(a_ )
A__ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , a_ )[0]
A__ = str(activation[1] )
A__ = num_experts
A__ = SwitchTransformersConfig(**a_ )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]="./" , SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ) -> Tuple:
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A__ = checkpoints.load_tax_checkpoint(a_ )
if gin_file is not None:
A__ = convert_gin_to_config(a_ , a_ )
else:
A__ = SwitchTransformersConfig.from_pretrained(a_ )
A__ = SwitchTransformersForConditionalGeneration(a_ )
A__ = flax_params['target']
A__ = flatten_dict(a_ , sep='/' )
A__ = rename_keys(a_ )
A__ = unflatten_dict(a_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a_ , a_ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(a_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 364
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class A :
"""simple docstring"""
def __init__( self : Any,lowercase_ : list[tuple[float, float]] )-> Optional[int]:
'''simple docstring'''
A__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
A__ = len(lowercase_ ) - 1
def snake_case__ ( self : List[Any],lowercase_ : float )-> list[float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree,lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase_ ),5 ) == 1
return output_values
def snake_case__ ( self : str,lowercase_ : float )-> tuple[float, float]:
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
A__ = self.basis_function(lowercase_ )
A__ = 0.0
A__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def snake_case__ ( self : str,lowercase_ : float = 0.01 )-> str:
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
A__ = [] # x coordinates of points to plot
A__ = [] # y coordinates of points to plot
A__ = 0.0
while t <= 1:
A__ = self.bezier_curve_function(lowercase_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
A__ = [i[0] for i in self.list_of_points]
A__ = [i[1] for i in self.list_of_points]
plt.plot(
lowercase_,lowercase_,color='blue',label='Curve of Degree ' + str(self.degree ),)
plt.scatter(lowercase_,lowercase_,color='red',label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 282
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowercase ,nominal_annual_percentage_rate / 365 ,number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[str] = downstream_dict["""projector.weight"""]
snake_case : Dict = downstream_dict["""projector.bias"""]
snake_case : Dict = downstream_dict["""model.post_net.linear.weight"""]
snake_case : List[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[str]:
snake_case : str = WavaVecaForAudioFrameClassification.from_pretrained(lowercase ,config=lowercase )
snake_case : List[Any] = downstream_dict["""model.linear.weight"""]
snake_case : str = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> str:
snake_case : Any = WavaVecaForXVector.from_pretrained(lowercase ,config=lowercase )
snake_case : str = downstream_dict["""connector.weight"""]
snake_case : Optional[Any] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case : List[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
snake_case : Optional[int] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
snake_case : List[str] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case : Any = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Tuple = torch.load(lowercase ,map_location="""cpu""" )
snake_case : Any = checkpoint["""Downstream"""]
snake_case : List[str] = WavaVecaConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
lowercase ,return_attention_mask=lowercase ,do_normalize=lowercase )
snake_case : str = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case : int = convert_classification(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case : Dict = convert_diarization(lowercase ,lowercase ,lowercase )
elif arch.endswith("""ForXVector""" ):
snake_case : Optional[Any] = convert_xvector(lowercase ,lowercase ,lowercase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
snake_case : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 124
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __A ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
lowerCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase : str = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCAmelCase_ )
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = logging.get_verbosity()
lowerCAmelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : str = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(UpperCAmelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowercase__ ( self : Any ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : Any = os.getenv('TRANSFORMERS_VERBOSITY' , UpperCAmelCase_ )
lowerCAmelCase : Any = logging.log_levels[env_level_str]
lowerCAmelCase : int = logging.get_verbosity()
self.assertEqual(
UpperCAmelCase_ , UpperCAmelCase_ , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
lowerCAmelCase : Tuple = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowercase__ ( self : int ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : Dict = logging.logging.getLogger()
with CaptureLogger(UpperCAmelCase_ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowercase__ ( self : Optional[int] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart' )
lowerCAmelCase : Tuple = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning_advice(UpperCAmelCase_ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCAmelCase_ ) as cl:
logger.warning_advice(UpperCAmelCase_ )
self.assertEqual(cl.out , msg + '\n' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 323
|
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCAmelCase : Optional[int] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_UpperCAmelCase, 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCAmelCase : Optional[Any] = (sidea + sidea + sidea) / 2
lowerCAmelCase : Any = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(10, 20) = }')
print(F'Square: {area_square(10) = }')
print(F'Triangle: {area_triangle(10, 10) = }')
print(F'Triangle: {area_triangle_three_sides(5, 12, 13) = }')
print(F'Parallelogram: {area_parallelogram(10, 20) = }')
print(F'Rhombus: {area_rhombus(10, 20) = }')
print(F'Trapezium: {area_trapezium(10, 20, 30) = }')
print(F'Circle: {area_circle(20) = }')
print(F'Ellipse: {area_ellipse(10, 20) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(20) = }')
print(F'Cuboid: {surface_area_cuboid(10, 20, 30) = }')
print(F'Sphere: {surface_area_sphere(20) = }')
print(F'Hemisphere: {surface_area_hemisphere(20) = }')
print(F'Cone: {surface_area_cone(10, 20) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }')
print(F'Cylinder: {surface_area_cylinder(10, 20) = }')
print(F'Torus: {surface_area_torus(20, 10) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 10) = }')
print(F'Square: {area_reg_polygon(4, 10) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 10) = }')
| 323
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ (_A , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = CTRLTokenizer
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__lowercase = {'''unk_token''': '''<unk>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
__lowercase = '''adapt react readapt apt'''
__lowercase = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
__lowercase = '''adapt react readapt apt'''
__lowercase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__lowercase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ )
| 104
|
"""simple docstring"""
def lowercase ( __snake_case : int = 1_0_0 ):
lowercase_ : str = 0
lowercase_ : List[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = sorted(numsa + numsa )
_A : Optional[int] = divmod(len(snake_case_ ),2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = [float(x) for x in input("Enter the elements of first array: ").split()]
_snake_case = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 369
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
@register_to_config
def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ) -> Any:
super().__init__()
_A : int = num_attention_heads
_A : Union[str, Any] = attention_head_dim
_A : Tuple = num_attention_heads * attention_head_dim
_A : Any = additional_embeddings
_A : Any = time_embed_dim or inner_dim
_A : List[str] = embedding_proj_dim or embedding_dim
_A : Optional[int] = clip_embed_dim or embedding_dim
_A : Union[str, Any] = Timesteps(_a , _a , 0 )
_A : str = TimestepEmbedding(_a , _a , out_dim=_a , act_fn=_a )
_A : Dict = nn.Linear(_a , _a )
if embedding_proj_norm_type is None:
_A : int = None
elif embedding_proj_norm_type == "layer":
_A : Optional[Any] = nn.LayerNorm(_a )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
_A : Optional[Any] = nn.Linear(_a , _a )
if encoder_hid_proj_type is None:
_A : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
_A : Tuple = nn.Linear(_a , _a )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
_A : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _a ) )
if added_emb_type == "prd":
_A : str = nn.Parameter(torch.zeros(1 , 1 , _a ) )
elif added_emb_type is None:
_A : Union[str, Any] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
_A : int = nn.ModuleList(
[
BasicTransformerBlock(
_a , _a , _a , dropout=_a , activation_fn="""gelu""" , attention_bias=_a , )
for d in range(_a )
] )
if norm_in_type == "layer":
_A : Union[str, Any] = nn.LayerNorm(_a )
elif norm_in_type is None:
_A : Tuple = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
_A : int = nn.LayerNorm(_a )
_A : str = nn.Linear(_a , _a )
_A : Any = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
_A : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , _a , persistent=_a )
_A : Tuple = nn.Parameter(torch.zeros(1 , _a ) )
_A : Dict = nn.Parameter(torch.zeros(1 , _a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
_A : List[str] = {}
def fn_recursive_add_processors(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
_A : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def a__ ( self , _a ) -> List[str]:
_A : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_a )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_a , _a , _a ):
if hasattr(_a , """set_processor""" ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def a__ ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ) -> Optional[Any]:
_A : Tuple = hidden_states.shape[0]
_A : List[Any] = timestep
if not torch.is_tensor(_a ):
_A : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
_A : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : Optional[int] = timesteps * torch.ones(_a , dtype=timesteps.dtype , device=timesteps.device )
_A : Dict = self.time_proj(_a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_A : Tuple = timesteps_projected.to(dtype=self.dtype )
_A : List[Any] = self.time_embedding(_a )
if self.embedding_proj_norm is not None:
_A : Dict = self.embedding_proj_norm(_a )
_A : List[Any] = self.embedding_proj(_a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_A : List[Any] = self.encoder_hidden_states_proj(_a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
_A : Optional[int] = self.proj_in(_a )
_A : Optional[int] = self.positional_embedding.to(hidden_states.dtype )
_A : Union[str, Any] = []
_A : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_A : List[str] = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_A : List[str] = hidden_states[:, None, :]
_A : Dict = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_A : Optional[int] = self.prd_embedding.to(hidden_states.dtype ).expand(_a , -1 , -1 )
additional_embeds.append(_a )
_A : str = torch.cat(
_a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_A : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_A : Union[str, Any] = F.pad(
_a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_A : Optional[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
_A : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
_A : List[Any] = F.pad(_a , (0, self.additional_embeddings) , value=0.0 )
_A : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_A : int = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_A : str = self.norm_in(_a )
for block in self.transformer_blocks:
_A : List[Any] = block(_a , attention_mask=_a )
_A : Any = self.norm_out(_a )
if self.prd_embedding is not None:
_A : int = hidden_states[:, -1]
else:
_A : Any = hidden_states[:, additional_embeddings_len:]
_A : Union[str, Any] = self.proj_to_clip_embeddings(_a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_a )
def a__ ( self , _a ) -> Tuple:
_A : List[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 343
| 0
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_a = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self , __a , __a , __a = None , __a = None) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = None
_UpperCamelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature'''))
_UpperCamelCase = os.path.abspath('''examples''')
for item in os.listdir(__a):
if item not in EXCLUDE_EXAMPLES:
_UpperCamelCase = os.path.join(__a , __a)
if os.path.isfile(__a) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_UpperCamelCase = compare_against_test(
os.path.join(__a , __a) , __a , __a , __a)
_UpperCamelCase = '''\n'''.join(__a)
if special_strings is not None:
for string in special_strings:
_UpperCamelCase = diff.replace(__a , '''''')
self.assertEqual(__a , '''''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' , __a)
self.one_complete_example('''complete_nlp_example.py''' , __a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py'''))
_UpperCamelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a)
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a)
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = False
@classmethod
def UpperCAmelCase ( cls) -> List[Any]:
'''simple docstring'''
super().setUpClass()
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls._tmpdir , '''default_config.yml''')
write_basic_config(save_location=cls.configPath)
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def UpperCAmelCase ( cls) -> Dict:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''')))
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
_UpperCamelCase = run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''')))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0")}
'''.split()
_UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__a)
self.assertNotIn('''epoch 0:''' , __a)
self.assertIn('''epoch 1:''' , __a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2")}
'''.split()
_UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__a)
if torch.cuda.is_available():
_UpperCamelCase = torch.cuda.device_count()
else:
_UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a)
self.assertIn('''epoch 1:''' , __a)
else:
self.assertIn('''epoch 0:''' , __a)
self.assertIn('''epoch 1:''' , __a)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''}):
_UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__a)
_UpperCamelCase = re.findall('''({.+})''' , __a)
_UpperCamelCase = [r for r in results if '''accuracy''' in r][-1]
_UpperCamelCase = ast.literal_eval(__a)
self.assertGreaterEqual(results['''accuracy'''] , 0.75)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs)
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''})
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
_UpperCamelCase = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs)
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''')))
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs)
| 194
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if isinstance(__a , __a):
_UpperCamelCase = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
if len(__a) == 0 or len(__a) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__a))
if isinstance(__a , __a):
_UpperCamelCase = [sequences]
_UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = args_parser
super().__init__(*__a , **__a)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
_UpperCamelCase = self.tokenizer.eos_token
try:
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
if kwargs.get('''multi_class''' , __a) is not None:
_UpperCamelCase = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
_UpperCamelCase = {}
if "candidate_labels" in kwargs:
_UpperCamelCase = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
_UpperCamelCase = kwargs['''hypothesis_template''']
_UpperCamelCase = {}
if "multi_label" in kwargs:
_UpperCamelCase = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ) -> int:
'''simple docstring'''
if len(__a) == 0:
pass
elif len(__a) == 1 and "candidate_labels" not in kwargs:
_UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''')
return super().__call__(__a , **__a)
def UpperCAmelCase ( self , __a , __a=None , __a="This example is {}.") -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._args_parser(__a , __a , __a)
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a)):
_UpperCamelCase = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a) - 1,
**model_input,
}
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = inputs['''candidate_label''']
_UpperCamelCase = inputs['''sequence''']
_UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCamelCase = self.model(**__a)
_UpperCamelCase = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self , __a , __a=False) -> Dict:
'''simple docstring'''
_UpperCamelCase = [outputs['''candidate_label'''] for outputs in model_outputs]
_UpperCamelCase = [outputs['''sequence'''] for outputs in model_outputs]
_UpperCamelCase = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
_UpperCamelCase = logits.shape[0]
_UpperCamelCase = len(__a)
_UpperCamelCase = N // n
_UpperCamelCase = logits.reshape((num_sequences, n, -1))
if multi_label or len(__a) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCamelCase = self.entailment_id
_UpperCamelCase = -1 if entailment_id == 0 else 0
_UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCamelCase = reshaped_outputs[..., self.entailment_id]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 194
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def update_area_of_max_square(UpperCamelCase__ , UpperCamelCase__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__lowerCamelCase = update_area_of_max_square(UpperCamelCase__ , col + 1 )
__lowerCamelCase = update_area_of_max_square(row + 1 , col + 1 )
__lowerCamelCase = update_area_of_max_square(row + 1 , UpperCamelCase__ )
if mat[row][col]:
__lowerCamelCase = 1 + min([right, diagonal, down] )
__lowerCamelCase = max(largest_square_area[0] , UpperCamelCase__ )
return sub_problem_sol
else:
return 0
__lowerCamelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
def update_area_of_max_square_using_dp_array(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__lowerCamelCase = update_area_of_max_square_using_dp_array(UpperCamelCase__ , col + 1 , UpperCamelCase__ )
__lowerCamelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase__ )
__lowerCamelCase = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase__ , UpperCamelCase__ )
if mat[row][col]:
__lowerCamelCase = 1 + min([right, diagonal, down] )
__lowerCamelCase = max(largest_square_area[0] , UpperCamelCase__ )
__lowerCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
__lowerCamelCase = [0]
__lowerCamelCase = [[-1] * cols for _ in range(UpperCamelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase__ )
return largest_square_area[0]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
__lowerCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCamelCase = dp_array[row][col + 1]
__lowerCamelCase = dp_array[row + 1][col + 1]
__lowerCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
__lowerCamelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = max(dp_array[row][col] , UpperCamelCase__ )
else:
__lowerCamelCase = 0
return largest_square_area
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
__lowerCamelCase = [0] * (cols + 1)
__lowerCamelCase = [0] * (cols + 1)
__lowerCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__lowerCamelCase = current_row[col + 1]
__lowerCamelCase = next_row[col + 1]
__lowerCamelCase = next_row[col]
if mat[row][col] == 1:
__lowerCamelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = max(current_row[col] , UpperCamelCase__ )
else:
__lowerCamelCase = 0
__lowerCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 237
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
return "".join(chr(ord(UpperCamelCase__ ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 237
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"allenai/led-base-16384": 16384,
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = LEDTokenizer
lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : List[Any],lowercase_ : Dict=None,lowercase_ : Optional[int]=None,lowercase_ : Any=None,lowercase_ : Optional[int]="replace",lowercase_ : List[Any]="<s>",lowercase_ : int="</s>",lowercase_ : List[str]="</s>",lowercase_ : str="<s>",lowercase_ : str="<unk>",lowercase_ : Dict="<pad>",lowercase_ : str="<mask>",lowercase_ : List[Any]=False,lowercase_ : Tuple=True,**lowercase_ : Optional[Any],)-> str:
'''simple docstring'''
super().__init__(
lowercase_,lowercase_,tokenizer_file=lowercase_,errors=lowercase_,bos_token=lowercase_,eos_token=lowercase_,sep_token=lowercase_,cls_token=lowercase_,unk_token=lowercase_,pad_token=lowercase_,mask_token=lowercase_,add_prefix_space=lowercase_,trim_offsets=lowercase_,**lowercase_,)
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space',lowercase_ ) != add_prefix_space:
A__ = getattr(lowercase_,pre_tok_state.pop('type' ) )
A__ = add_prefix_space
A__ = pre_tok_class(**lowercase_ )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = 'post_processor'
A__ = getattr(self.backend_tokenizer,lowercase_,lowercase_ )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state['sep'] )
if "cls" in state:
A__ = tuple(state['cls'] )
A__ = False
if state.get('add_prefix_space',lowercase_ ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get('trim_offsets',lowercase_ ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(lowercase_,state.pop('type' ) )
A__ = component_class(**lowercase_ )
setattr(self.backend_tokenizer,lowercase_,lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Tuple,lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else value
A__ = value
def snake_case__ ( self : Union[str, Any],*lowercase_ : Union[str, Any],**lowercase_ : List[Any] )-> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get('is_split_into_words',lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_,**lowercase_ )
def snake_case__ ( self : List[Any],*lowercase_ : int,**lowercase_ : List[Any] )-> BatchEncoding:
'''simple docstring'''
A__ = kwargs.get('is_split_into_words',lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_,**lowercase_ )
def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(lowercase_,name=lowercase_ )
return tuple(lowercase_ )
def snake_case__ ( self : List[str],lowercase_ : str,lowercase_ : Any=None )-> Union[str, Any]:
'''simple docstring'''
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : str,lowercase_ : Union[Dict[str, EncodedInput], BatchEncoding],lowercase_ : Optional[int] = None,lowercase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD,lowercase_ : Optional[int] = None,lowercase_ : Optional[bool] = None,)-> dict:
'''simple docstring'''
A__ = super()._pad(
encoded_inputs=lowercase_,max_length=lowercase_,padding_strategy=lowercase_,pad_to_multiple_of=lowercase_,return_attention_mask=lowercase_,)
# Load from model defaults
if return_attention_mask is None:
A__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
A__ = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
A__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 7
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs
| 282
| 0
|
from __future__ import annotations
__lowerCamelCase : Optional[int] = """#"""
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : dict = {}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._trie
for char in text:
if char not in trie:
UpperCamelCase : Any = {}
UpperCamelCase : int = trie[char]
UpperCamelCase : Dict = True
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = self._trie
for char in prefix:
if char in trie:
UpperCamelCase : List[Any] = trie[char]
else:
return []
return self._elements(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = []
for c, v in d.items():
UpperCamelCase : List[Any] = [" "] if c == END else [(c + s) for s in self._elements(A_ )]
result.extend(A_ )
return tuple(A_ )
__lowerCamelCase : Dict = Trie()
__lowerCamelCase : List[Any] = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def A_ ( _lowerCAmelCase ) -> tuple:
UpperCamelCase : Optional[int] = trie.find_word(_lowerCAmelCase )
return tuple(string + word for word in suffixes )
def A_ ( ) -> None:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 356
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __snake_case ):
def __init__( self , A_ , A_=None , A_=None , A_=0 ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 1.0 if scale is None else scale
UpperCamelCase : Optional[int] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.variance.sqrt()
class A__ ( nn.Module ):
def __init__( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Union[str, Any] = args_dim
UpperCamelCase : str = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
UpperCamelCase : Union[str, Any] = domain_map
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class A__ ( nn.Module ):
def __init__( self , A_ ):
'''simple docstring'''
super().__init__()
UpperCamelCase : str = function
def __UpperCamelCase( self , A_ , *A_ ):
'''simple docstring'''
return self.function(A_ , *A_ )
class A__ :
_UpperCAmelCase :type
_UpperCAmelCase :int
_UpperCAmelCase :Dict[str, int]
def __init__( self , A_ = 1 ):
'''simple docstring'''
UpperCamelCase : Tuple = dim
UpperCamelCase : Union[str, Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , ):
'''simple docstring'''
UpperCamelCase : str = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 0.0
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCamelCase( self , *A_ ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def __UpperCamelCase( A_ ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_UpperCAmelCase :type = StudentT
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase : int = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"loc": 1, "scale": 1}
_UpperCAmelCase :type = Normal
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __snake_case ):
_UpperCAmelCase :Dict[str, int] = {"total_count": 1, "logits": 1}
_UpperCAmelCase :type = NegativeBinomial
@classmethod
def __UpperCamelCase( cls , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def __UpperCamelCase( self , A_ , A_ = None , A_ = None ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 140
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = logging.get_verbosity()
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE : str = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE : int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE : Dict = logging.get_verbosity()
self.assertEqual(
lowerCamelCase_ , lowerCamelCase_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
SCREAMING_SNAKE_CASE : int = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowerCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE : str = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE : Dict = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , msg + """\n""" )
def __A ( ):
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 323
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = path_or_paths
SCREAMING_SNAKE_CASE : List[Any] = split if split or isinstance(lowerCamelCase_ , lowerCamelCase_ ) else """train"""
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : Union[str, Any] = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Union[str, Any] = streaming
SCREAMING_SNAKE_CASE : Optional[int] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = features
SCREAMING_SNAKE_CASE : int = cache_dir
SCREAMING_SNAKE_CASE : Dict = keep_in_memory
SCREAMING_SNAKE_CASE : Tuple = streaming
SCREAMING_SNAKE_CASE : Union[str, Any] = num_proc
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs
@abstractmethod
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
| 323
| 1
|
'''simple docstring'''
import re
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Union[str, Any] = re.compile(r'^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$' )
if match := re.search(__UpperCamelCase , __UpperCamelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 369
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Optional[int] = old_name
if "patch_embed" in old_name:
_a , _a , _a : int = old_name.split('.' )
if layer == "0":
_a : Dict = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a : List[str] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a : List[str] = old_name.replace('3' , 'convolution2' )
else:
_a : Union[str, Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , lowerCAmelCase_ ):
_a : Dict = r'\b\d{2}\b'
if bool(re.search(lowerCAmelCase_ , lowerCAmelCase_ ) ):
_a : Dict = re.search(r'\d\.\d\d.' , lowerCAmelCase_ ).group()
else:
_a : Optional[int] = re.search(r'\d\.\d.' , lowerCAmelCase_ ).group()
if int(match[0] ) < 6:
_a : int = old_name.replace(lowerCAmelCase_ , '' )
_a : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a : int = 'intermediate_stages.' + trimmed_name
else:
_a : Tuple = old_name.replace(lowerCAmelCase_ , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a : int = str(int(match[2] ) - num_meta4D_last_stage )
_a : List[str] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a : Union[str, Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a : List[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a : Dict = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a : List[Any] = trimmed_name.replace('fc2' , 'linear_out' )
_a : Dict = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , lowerCAmelCase_ ):
_a : List[str] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a : Optional[int] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a : Tuple = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a : Union[str, Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a : Tuple = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a : List[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a : Dict = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a : Tuple = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a : int = new_name.replace('norm' , 'layernorm' )
_a : Any = 'efficientformer.' + new_name
else:
_a : Optional[int] = 'efficientformer.encoder.' + new_name
return new_name
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
for key in checkpoint.copy().keys():
_a : List[str] = checkpoint.pop(lowerCAmelCase_ )
_a : List[str] = val
return checkpoint
def __lowerCamelCase ( ) -> Any:
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : int = torch.load(lowerCAmelCase_ , map_location='cpu' )['model']
_a : Any = EfficientFormerConfig.from_json_file(lowerCAmelCase_ )
_a : Optional[int] = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase_ )
_a : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a : Dict = config.depths[-1] - config.num_metaad_blocks + 1
_a : int = convert_torch_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
_a : Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a : Any = prepare_img()
_a : Dict = 256
_a : Dict = 224
_a : int = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a : Optional[int] = processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values
# original processing pipeline
_a : str = Compose(
[
Resize(lowerCAmelCase_ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
Normalize(lowerCAmelCase_ , lowerCAmelCase_ ),
] )
_a : Any = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Tuple = model(lowerCAmelCase_ )
_a : Tuple = outputs.logits
_a : List[Any] = (1, 1000)
if "l1" in model_name:
_a : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a : Tuple = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 107
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_50, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_00, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class A ( unittest.TestCase ):
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=__UpperCAmelCase , )
assert hasattr(self , "env" )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int]=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
TrainingJobAnalytics(__UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ (self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __UpperCAmelCase )
| 65
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : str=False ) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ = ""
else:
lowerCAmelCase__ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
lowerCAmelCase__ = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _a ( UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase_ , UpperCamelCase_ )
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ = ViTMSNConfig()
lowerCAmelCase__ = 1_000
lowerCAmelCase__ = "datasets/huggingface/label-files"
lowerCAmelCase__ = "imagenet-1k-id2label.json"
lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ ) , "r" ) )
lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase__ = 384
lowerCAmelCase__ = 1_536
lowerCAmelCase__ = 6
elif "l16" in checkpoint_url:
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase__ = 4
elif "l7" in checkpoint_url:
lowerCAmelCase__ = 7
lowerCAmelCase__ = 1_024
lowerCAmelCase__ = 4_096
lowerCAmelCase__ = 24
lowerCAmelCase__ = 16
lowerCAmelCase__ = 0.1
lowerCAmelCase__ = ViTMSNModel(UpperCamelCase_ )
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="cpu" )["target_encoder"]
lowerCAmelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCamelCase_ )
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ , base_model=UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
read_in_q_k_v(UpperCamelCase_ , UpperCamelCase_ , base_model=UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
lowerCAmelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase__ = model(**UpperCamelCase_ )
lowerCAmelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCamelCase_ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 340
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = UniSpeechSatForSequenceClassification.from_pretrained(a__ , config=a__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(a__ , config=a__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = UniSpeechSatForXVector.from_pretrained(a__ , config=a__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
a_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
a_ = torch.load(a__ , map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(a__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
a__ , return_attention_mask=a__ , do_normalize=a__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(a__ , a__ , a__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(a__ , a__ , a__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(a__ , a__ , a__ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 360
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
a_ = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ = [3, 3, 3, 3]
a_ = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ = [4, 4, 4, 4]
a_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ = [3, 3, 3, 3]
if "lrf" in model_name:
a_ = [3, 3, 3, 3]
else:
a_ = [2, 2, 2, 2]
if "tiny" in model_name:
a_ = 96
elif "small" in model_name:
a_ = 96
elif "base" in model_name:
a_ = 128
elif "large" in model_name:
a_ = 192
elif "xlarge" in model_name:
a_ = 256
elif "huge" in model_name:
a_ = 352
# set label information
a_ = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a_ = "imagenet-22k-id2label.json"
else:
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = {v: k for k, v in idalabel.items()}
a_ = FocalNetConfig(
embed_dim=UpperCAmelCase , depths=UpperCAmelCase , focal_levels=UpperCAmelCase , focal_windows=UpperCAmelCase , use_conv_embed=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , use_post_layernorm=UpperCAmelCase , use_layerscale=UpperCAmelCase , )
return config
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a_ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a_ = "encoder." + name
if "encoder.layers" in name:
a_ = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a_ = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a_ = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a_ = "layernorm.weight"
if name == "norm.bias":
a_ = "layernorm.bias"
if "head" in name:
a_ = name.replace("head" , "classifier" )
else:
a_ = "focalnet." + name
return name
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) ->Dict:
"""simple docstring"""
a_ = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a_ = model_name_to_url[model_name]
print("Checkpoint URL: " , UpperCAmelCase )
a_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
a_ = get_focalnet_config(UpperCAmelCase )
a_ = FocalNetForImageClassification(UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(UpperCAmelCase )
# verify conversion
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = BitImageProcessor(
do_resize=UpperCAmelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase , crop_size=224 , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , )
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
a_ = processor(images=UpperCAmelCase , return_tensors="pt" )
a_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ = image_transforms(UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCAmelCase , atol=1E-4 )
a_ = model(**UpperCAmelCase )
a_ = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCamelCase_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 0
|
'''simple docstring'''
from __future__ import annotations
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , A : int ):
_UpperCAmelCase : Optional[int] = data
_UpperCAmelCase : Node | None = None
_UpperCAmelCase : Node | None = None
def UpperCamelCase_ ( _UpperCAmelCase : Tuple ) -> List[str]: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase_ ( _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase_ ( ) -> str: # Main function for testing.
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = Node(1 )
_UpperCAmelCase : Dict = Node(2 )
_UpperCAmelCase : Optional[int] = Node(3 )
_UpperCAmelCase : Dict = Node(4 )
_UpperCAmelCase : int = Node(5 )
_UpperCAmelCase : Union[str, Any] = Node(6 )
_UpperCAmelCase : Optional[int] = Node(7 )
_UpperCAmelCase : Union[str, Any] = Node(8 )
_UpperCAmelCase : List[Any] = Node(9 )
print(is_full_binary_tree(_UpperCAmelCase ) )
print(depth_of_tree(_UpperCAmelCase ) )
print("Tree is: " )
display(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 31
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236
| 0
|
import argparse
from collections import defaultdict
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
_snake_case = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCAmelCase , '''r''' ) as f:
_snake_case = f.readlines()
_snake_case = F"""class {class_name}("""
_snake_case = F"""{4 * ' '}def {test_name}("""
_snake_case = F"""{8 * ' '}{correct_line.split()[0]}"""
_snake_case = F"""{16 * ' '}{correct_line.split()[0]}"""
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = 0
_snake_case = 0
_snake_case = []
for line in lines:
if line.startswith(__lowerCAmelCase ):
_snake_case = True
elif in_class and line.startswith(__lowerCAmelCase ):
_snake_case = True
elif in_class and in_func and (line.startswith(__lowerCAmelCase ) or line.startswith(__lowerCAmelCase )):
_snake_case = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_snake_case = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_snake_case = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * ' '}{correct_line}""" )
_snake_case = False
else:
new_lines.append(__lowerCAmelCase )
with open(__lowerCAmelCase , '''w''' ) as f:
for line in new_lines:
f.write(__lowerCAmelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase=None ) -> Dict:
"""simple docstring"""
if fail is not None:
with open(__lowerCAmelCase , '''r''' ) as f:
_snake_case = {l.strip() for l in f.readlines()}
else:
_snake_case = None
with open(__lowerCAmelCase , '''r''' ) as f:
_snake_case = f.readlines()
_snake_case = defaultdict(__lowerCAmelCase )
for line in correct_lines:
_snake_case = line.split(''';''' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
__A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 350
|
__A = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_UpperCamelCase )
_snake_case = ''''''.join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data )
_snake_case = len(_UpperCamelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b'''=''' * ((6 - len(_UpperCamelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6)
else:
_snake_case = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode()
+ padding
)
def snake_case_(_UpperCamelCase ) -> bytes:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
_snake_case = (
'''argument should be a bytes-like object or ASCII string, '''
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_UpperCamelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_UpperCamelCase , _UpperCamelCase ):
try:
_snake_case = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
_snake_case = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = ''''''.join(
bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_UpperCamelCase ) , 8 )
]
return bytes(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278
| 0
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__A =4
__A =3
class _SCREAMING_SNAKE_CASE ( __A ):
pass
def lowerCamelCase_ ( lowerCamelCase__ ):
for shard in shards:
for i in range(__lowercase ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ):
lowerCamelCase_ = int(os.environ["RANK"] )
lowerCamelCase_ = int(os.environ["WORLD_SIZE"] )
lowerCamelCase_ = ArgumentParser()
parser.add_argument("--streaming" , type=__lowercase )
parser.add_argument("--local_rank" , type=__lowercase )
parser.add_argument("--num_workers" , type=__lowercase , default=0 )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.streaming
lowerCamelCase_ = args.num_workers
lowerCamelCase_ = {'shards': [F'shard_{shard_idx}' for shard_idx in range(__lowercase )]}
lowerCamelCase_ = IterableDataset.from_generator(__lowercase , gen_kwargs=__lowercase )
if not streaming:
lowerCamelCase_ = Dataset.from_list(list(__lowercase ) )
lowerCamelCase_ = split_dataset_by_node(__lowercase , rank=__lowercase , world_size=__lowercase )
lowerCamelCase_ = torch.utils.data.DataLoader(__lowercase , num_workers=__lowercase )
lowerCamelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'local_size {local_size} != expected_local_size {expected_local_size}' )
if __name__ == "__main__":
main()
| 19
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int]=8 ):
'''simple docstring'''
A_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : List[str] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : List[str] = latents.to(lowercase )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ , A_ : int = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
A_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
A_ : Dict = self._execution_device
A_ : Dict = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
A_ : Dict = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : str = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.cat(lowercase , dim=0 )
A_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Optional[Any] = hint.repeat_interleave(lowercase , dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
A_ : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Any = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_ , A_ : List[Any] = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {'image_embeds': image_embeds, 'hint': hint}
A_ : Dict = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ , A_ : List[str] = variance_pred.chunk(2 )
A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ , A_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
A_ : Any = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A_ : Optional[Any] = image * 0.5 + 0.5
A_ : int = image.clamp(0 , 1 )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 140
| 0
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCAmelCase )
snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case = dataset_size < in_memory_max_size
else:
snake_case = False
snake_case = is_small_dataset(__lowerCAmelCase )
assert result == expected
| 3
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
| 1
|
from __future__ import annotations
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
_lowerCAmelCase = (left + right) >> 1 # the middle
_lowerCAmelCase = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
_lowerCAmelCase = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 82
|
from __future__ import annotations
def __magic_name__ ( A : list ):
'''simple docstring'''
if len(A ) == 0:
return []
a , a = min(A ), max(A )
a = int(max_value - min_value ) + 1
a = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 107
| 0
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowerCAmelCase__ : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
lowerCAmelCase__ : str = "sshleifer/student_marian_en_ro_6_1"
lowerCAmelCase__ : Union[str, Any] = "sshleifer/tiny-mbart"
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCAmelCase_ , num_train_epochs=1 , distributed=UpperCAmelCase_ , extra_args_str=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , do_predict=UpperCAmelCase_ , )
__UpperCAmelCase : List[str] = TrainerState.load_from_json(os.path.join(UpperCAmelCase_ , "trainer_state.json" ) ).log_history
if not do_eval:
return
__UpperCAmelCase : int = [log for log in logs if "eval_loss" in log.keys()]
__UpperCAmelCase : Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__UpperCAmelCase : List[str] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , UpperCAmelCase_ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCAmelCase_ )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCAmelCase_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=UpperCAmelCase_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=UpperCAmelCase_ , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=UpperCAmelCase_ )
@require_apex
@require_torch_gpu
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCAmelCase_ , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__UpperCAmelCase : List[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
__UpperCAmelCase : Optional[Any] = experiments[experiment_id]
__UpperCAmelCase : Optional[Any] = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
__UpperCAmelCase : List[Any] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCAmelCase_ , extra_args_str=data["extra_args_str"] )
__UpperCAmelCase : Tuple = len(re.findall(UpperCAmelCase_ , cl.err ) )
self.assertEqual(UpperCAmelCase_ , data["n_matches"] )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Any = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCAmelCase_ , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCAmelCase_ , )
# Check metrics
__UpperCAmelCase : Dict = TrainerState.load_from_json(os.path.join(UpperCAmelCase_ , "trainer_state.json" ) ).log_history
__UpperCAmelCase : str = [log for log in logs if "eval_loss" in log.keys()]
__UpperCAmelCase : Tuple = eval_metrics[0]
__UpperCAmelCase : Optional[int] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , UpperCAmelCase_ )
# test if do_predict saves generations and metrics
__UpperCAmelCase : str = os.listdir(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = {os.path.basename(UpperCAmelCase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCAmelCase_ : str ) -> Tuple[int, float]:
__UpperCAmelCase : Tuple = "--skip_memory_metrics 0"
__UpperCAmelCase : Any = self.run_trainer(
max_len=128 , model_name=UpperCAmelCase_ , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCAmelCase_ , distributed=UpperCAmelCase_ , extra_args_str=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , do_predict=UpperCAmelCase_ , n_gpus_to_use=1 , )
# Check metrics
__UpperCAmelCase : Optional[int] = TrainerState.load_from_json(Path(UpperCAmelCase_ , "trainer_state.json" ) ).log_history
__UpperCAmelCase : Union[str, Any] = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
__UpperCAmelCase : Union[str, Any] = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
__UpperCAmelCase : str = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__UpperCAmelCase : Any = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__UpperCAmelCase : int = gpu_peak_mem_orig + gpu_alloc_mem_orig
__UpperCAmelCase : Dict = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__UpperCAmelCase : Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__UpperCAmelCase : Dict = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCAmelCase_ , UpperCAmelCase_ , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
UpperCAmelCase_ , UpperCAmelCase_ , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
UpperCAmelCase_ , UpperCAmelCase_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 3e-3 , UpperCAmelCase_ : str = "adafactor" , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
__UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__UpperCAmelCase : Optional[Any] = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(UpperCAmelCase_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(UpperCAmelCase_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
__UpperCAmelCase : Dict = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(UpperCAmelCase_ )}\n ".split()
__UpperCAmelCase : Any = "\n --do_predict\n ".split()
__UpperCAmelCase : Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__UpperCAmelCase : List[str] = get_gpu_count()
__UpperCAmelCase : Union[str, Any] = get_torch_dist_unique_port()
__UpperCAmelCase : List[str] = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
__UpperCAmelCase : Optional[Any] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase_ , env=self.get_env() )
else:
__UpperCAmelCase : List[str] = ["run_translation.py"] + args
with patch.object(UpperCAmelCase_ , "argv" , UpperCAmelCase_ ):
main()
return output_dir
| 37
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( A__ ):
def __init__( self : str , _a : List[Any] , _a : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : int = 1 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : float = 0.0 , _a : int = 50 , _a : Optional[bool] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , _a ):
_SCREAMING_SNAKE_CASE =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_SCREAMING_SNAKE_CASE =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_a )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_SCREAMING_SNAKE_CASE =randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_SCREAMING_SNAKE_CASE =self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a ).prev_sample
_SCREAMING_SNAKE_CASE =(image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE =self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 47
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowercase_ = numpy.array([0, 0])
lowercase_ = numpy.array([0.5, 0.866_0254])
lowercase_ = numpy.array([1, 0])
lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = initial_vectors
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case )
return vectors
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = []
for i, start_vector in enumerate(vectors[:-1] ):
__SCREAMING_SNAKE_CASE : str = vectors[i + 1]
new_vectors.append(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case )
__SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case )
plt.plot(snake_case , snake_case )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 303
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : List[Any] = StableDiffusionLDMaDPipeline
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self : List[str] ):
torch.manual_seed(0 )
UpperCamelCase :Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase :Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase :Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase :Optional[int] = CLIPTextModel(__lowerCamelCase )
UpperCamelCase :Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase :str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _A ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ):
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[str] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Union[str, Any] ):
UpperCamelCase :str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :Any = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1]
UpperCamelCase :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :int = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
UpperCamelCase :Dict = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def _A ( self : str ):
UpperCamelCase :Optional[int] = self.get_dummy_components()
UpperCamelCase :int = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Tuple = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :Tuple = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :List[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :int = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase :List[Any] = ldmad_pipe.tokenizer(
__lowerCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__lowerCamelCase , return_tensors="""pt""" , )
UpperCamelCase :List[str] = text_inputs["""input_ids"""].to(__lowerCamelCase )
UpperCamelCase :List[Any] = ldmad_pipe.text_encoder(__lowerCamelCase )[0]
UpperCamelCase :Dict = prompt_embeds
# forward
UpperCamelCase :str = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Optional[int] = output.rgb, output.depth
UpperCamelCase :Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase :int = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def _A ( self : List[Any] ):
UpperCamelCase :int = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase :Any = self.get_dummy_components()
UpperCamelCase :Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
UpperCamelCase :Tuple = StableDiffusionLDMaDPipeline(**__lowerCamelCase )
UpperCamelCase :Optional[int] = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_dummy_inputs(__lowerCamelCase )
UpperCamelCase :str = """french fries"""
UpperCamelCase :Optional[int] = ldmad_pipe(**__lowerCamelCase , negative_prompt=__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[Any] = output.rgb, output.depth
UpperCamelCase :List[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase :str = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase :Dict = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
UpperCamelCase :Any = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Tuple , __lowerCamelCase : str , __lowerCamelCase : Tuple="cpu" , __lowerCamelCase : str=torch.floataa , __lowerCamelCase : Tuple=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :str = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Tuple = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase :Any = ldmad_pipe.to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[int] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Optional[Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :List[str] = output.rgb, output.depth
UpperCamelCase :int = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase :Optional[Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase :Tuple = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
UpperCamelCase :Optional[Any] = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Dict="cpu" , __lowerCamelCase : int=torch.floataa , __lowerCamelCase : Union[str, Any]=0 ):
UpperCamelCase :str = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Any = np.random.RandomState(__lowerCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase :Optional[Any] = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase , dtype=__lowerCamelCase )
UpperCamelCase :List[str] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _A ( self : str ):
UpperCamelCase :List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :int = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Dict = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :Union[str, Any] = output.rgb, output.depth
UpperCamelCase :Dict = 0.495586
UpperCamelCase :Dict = 0.33795515
UpperCamelCase :Union[str, Any] = 112.48518
UpperCamelCase :Any = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def _A ( self : Union[str, Any] ):
UpperCamelCase :List[str] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__lowerCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = self.get_inputs(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = ldmad_pipe(**__lowerCamelCase )
UpperCamelCase , UpperCamelCase :int = output.rgb, output.depth
UpperCamelCase :Optional[int] = 0.4194127
UpperCamelCase :str = 0.35375586
UpperCamelCase :Union[str, Any] = 0.5638502
UpperCamelCase :Union[str, Any] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 62
|
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :List[str] = []
for i in range(len(__magic_name__ ) ):
UpperCamelCase :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase :Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase :str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase :int = []
for _ in range(__magic_name__ ):
# Create output image
UpperCamelCase :int = Image.new("""RGB""" , (len(cells[0] ), len(__magic_name__ )) )
UpperCamelCase :Tuple = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase :Union[str, Any] = 255 - cells[y][x] * 255
UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCamelCase :Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Any = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 62
| 1
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowerCAmelCase__ : Optional[int] = hex_num[0] == """-"""
if is_negative:
lowerCAmelCase__ : Any = hex_num[1:]
try:
lowerCAmelCase__ : Optional[int] = int(UpperCamelCase , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowerCAmelCase__ : Optional[int] = """"""
while int_num > 0:
lowerCAmelCase__ : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
def __UpperCamelCase ( _A ):
if not numbers:
return 0
if not isinstance(_A , (list, tuple) ) or not all(
isinstance(_A , _A ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
lowerCAmelCase_ = lowerCAmelCase_ = lowerCAmelCase_ = numbers[0]
for i in range(1 , len(_A ) ):
# update the maximum and minimum subarray products
lowerCAmelCase_ = numbers[i]
if number < 0:
lowerCAmelCase_ , lowerCAmelCase_ = min_till_now, max_till_now
lowerCAmelCase_ = max(_A , max_till_now * number )
lowerCAmelCase_ = min(_A , min_till_now * number )
# update the maximum product found till now
lowerCAmelCase_ = max(_A , _A )
return max_prod
| 278
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345
| 1
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , snake_case__ )
A : Dict = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A : Dict = dataset_size < in_memory_max_size
else:
A : Tuple = False
A : int = is_small_dataset(snake_case__ )
assert result == expected
| 3
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : str = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A ( __snake_case ):
__magic_name__ = '''bert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : Optional[int] = vocab_size
A : Optional[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : List[str] = num_attention_heads
A : Dict = hidden_act
A : Optional[Any] = intermediate_size
A : List[Any] = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[Any] = max_position_embeddings
A : List[str] = type_vocab_size
A : Dict = initializer_range
A : str = layer_norm_eps
A : int = position_embedding_type
A : Dict = use_cache
A : str = classifier_dropout
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 3
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] = True , __lowerCamelCase : List[Any] = None , __lowerCamelCase : Tuple = 32 , __lowerCamelCase : Union[str, Any] = True , __lowerCamelCase : str = 1 / 255 , __lowerCamelCase : Any = True , __lowerCamelCase : int = True , __lowerCamelCase : Union[str, Any] = [0.48145466, 0.4578275, 0.40821073] , __lowerCamelCase : List[str] = [0.26862954, 0.26130258, 0.27577711] , __lowerCamelCase : List[Any] = True , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Optional[int]=400 , __lowerCamelCase : Any=3 , ):
UpperCamelCase :Any = parent
UpperCamelCase :Any = do_resize
UpperCamelCase :int = size if size is not None else {"""shortest_edge""": 288}
UpperCamelCase :str = size_divisor
UpperCamelCase :Dict = do_rescale
UpperCamelCase :Optional[Any] = rescale_factor
UpperCamelCase :List[str] = do_normalize
UpperCamelCase :List[str] = do_center_crop
UpperCamelCase :Any = image_mean
UpperCamelCase :Tuple = image_std
UpperCamelCase :Optional[Any] = do_pad
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Dict = num_channels
UpperCamelCase :List[Any] = min_resolution
UpperCamelCase :Dict = max_resolution
def _A ( self : Optional[int] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _A ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int=False ):
if not batched:
UpperCamelCase :List[Any] = self.size["""shortest_edge"""]
UpperCamelCase :Union[str, Any] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
UpperCamelCase , UpperCamelCase :Tuple = image.size
else:
UpperCamelCase , UpperCamelCase :str = image.shape[1], image.shape[2]
UpperCamelCase :Optional[int] = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
UpperCamelCase , UpperCamelCase :List[str] = size, scale * w
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = scale * h, size
UpperCamelCase :str = int((1_333 / 800) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
UpperCamelCase :int = max_size / max(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Any = newh * scale
UpperCamelCase :int = neww * scale
UpperCamelCase , UpperCamelCase :int = int(newh + 0.5 ), int(neww + 0.5 )
UpperCamelCase , UpperCamelCase :str = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
UpperCamelCase :Union[str, Any] = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase :Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase :Optional[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
UpperCamelCase :List[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
snake_case__ : Optional[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def _A ( self : List[str] ):
UpperCamelCase :Optional[int] = BridgeTowerImageProcessingTester(self )
@property
def _A ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[str] ):
UpperCamelCase :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size_divisor""" ) )
def _A ( self : Optional[Any] ):
pass
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : Optional[Any] ):
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Optional[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self : List[Any] ):
UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase :Optional[Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 356
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : tuple[int, int] , __magic_name__ : int ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase :Union[str, Any] = position
UpperCamelCase :int = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCamelCase :Tuple = []
for position in positions:
UpperCamelCase , UpperCamelCase :int = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__magic_name__ )
return permissible_positions
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : tuple[int, int] , __magic_name__ : int ) -> bool:
"""simple docstring"""
if is_complete(__magic_name__ ):
return True
for position in get_valid_pos(__magic_name__ , len(__magic_name__ ) ):
UpperCamelCase , UpperCamelCase :Any = position
if board[y][x] == 0:
UpperCamelCase :Optional[int] = curr + 1
if open_knight_tour_helper(__magic_name__ , __magic_name__ , curr + 1 ):
return True
UpperCamelCase :List[Any] = 0
return False
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :Any = [[0 for i in range(__magic_name__ )] for j in range(__magic_name__ )]
for i in range(__magic_name__ ):
for j in range(__magic_name__ ):
UpperCamelCase :int = 1
if open_knight_tour_helper(__magic_name__ , (i, j) , 1 ):
return board
UpperCamelCase :Optional[int] = 0
UpperCamelCase :Union[str, Any] = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
| 0
|
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_lowerCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
_lowerCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowerCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase = re.compile(R'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase = re.compile(R'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowerCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowerCAmelCase = re.compile(R'''^\s*else:''')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if _re_test_backend.search(UpperCamelCase ) is None:
return None
lowerCAmelCase__ : Any = [b[0] for b in _re_backend.findall(UpperCamelCase )]
backends.sort()
return "_and_".join(UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase__ : Union[str, Any] = f.readlines()
lowerCAmelCase__ : str = 0
while line_index < len(UpperCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ : int = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase ):
lowerCAmelCase__ : Any = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0]
lowerCAmelCase__ : List[Any] = re.findall(R"""\[([^\]]+)\]""" , UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowerCAmelCase__ : Union[str, Any] = _re_import_struct_key_value.search(UpperCamelCase )
if single_line_import_search is not None:
lowerCAmelCase__ : int = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ : Tuple = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowerCAmelCase__ : Dict = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : int = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(""", """ )
lowerCAmelCase__ : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_between_brackets.search(UpperCamelCase ) is not None:
lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(""", """ )
lowerCAmelCase__ : Dict = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0]
objects.extend(UpperCamelCase )
elif _re_quote_object.search(UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ : Tuple = []
while (
line_index < len(UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowerCAmelCase__ : int = lines[line_index]
lowerCAmelCase__ : Dict = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ : Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowerCAmelCase__ : Optional[Any] = lines[line_index]
lowerCAmelCase__ : int = _re_import.search(UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
def find_duplicates(UpperCamelCase ):
return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ : Any = []
for key in import_dict_objects.keys():
lowerCAmelCase__ : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ : int = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """__init__.py""" )
lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase )
if objects is not None:
lowerCAmelCase__ : Dict = analyze_results(*UpperCamelCase )
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : List[str] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(UpperCamelCase ) )
if len(UpperCamelCase ) > 0:
raise ValueError("""\n\n""".join(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : int = []
for path, directories, files in os.walk(UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowerCAmelCase__ : List[Any] = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : str = short_path.replace(os.path.sep , """.""" )
submodules.append(UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ : str = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) )
lowerCAmelCase__ : int = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(UpperCamelCase )
return submodules
_lowerCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase )
lowerCAmelCase__ : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase , """__init__.py""" ) , """r""" ) as f:
lowerCAmelCase__ : int = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , UpperCamelCase ) ) )
lowerCAmelCase__ : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase ) > 0:
lowerCAmelCase__ : Optional[Any] = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 37
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = '''mid_block.attentions.0.'''
_lowerCAmelCase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{j}."""
_lowerCAmelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{i}."""
_lowerCAmelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : List[Any] = [None, None, None]
lowerCAmelCase__ : Dict = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Union[str, Any] = [None, None, None]
lowerCAmelCase__ : Any = v
continue
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 37
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : List[str] = [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
__UpperCamelCase : Dict = {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : Optional[Any] = {f"funnel-transformer/{name}": 512 for name in _model_names}
__UpperCamelCase : Any = {f"funnel-transformer/{name}": {"do_lower_case": True} for name in _model_names}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: Tuple = PRETRAINED_INIT_CONFIGURATION
A: Tuple = FunnelTokenizer
A: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: int = 2
def __init__( self : List[str] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=None , lowerCamelCase__ : int=True , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : List[Any]="<sep>" , lowerCamelCase__ : int="<pad>" , lowerCamelCase__ : Any="<cls>" , lowerCamelCase__ : Optional[Any]="<mask>" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : Optional[int]="</s>" , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]="##" , **lowerCamelCase__ : int , ) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , clean_text=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , wordpieces_prefix=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase__ : Any = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ : List[Any] = do_lower_case
UpperCamelCase__ : str = strip_accents
UpperCamelCase__ : List[str] = tokenize_chinese_chars
UpperCamelCase__ : List[Any] = normalizer_class(**lowerCamelCase__ )
UpperCamelCase__ : List[Any] = do_lower_case
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int=None ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : List[str] = [self.sep_token_id]
UpperCamelCase__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 51
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = "cpu"
__UpperCamelCase : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCamelCase : int = "path-to-your-trained-model"
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : Any = torch.randn(2, 77, 768)
__UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : Optional[Any] = 666
__UpperCamelCase : int = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : int = {"generator": generator}
if args.steps is not None:
__UpperCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
for param in module.parameters():
__UpperCamelCase =False
def _UpperCAmelCase ( ):
__UpperCamelCase ='cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCamelCase ='mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =plt.imshow(SCREAMING_SNAKE_CASE__ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( ):
__UpperCamelCase =datetime.now()
__UpperCamelCase =current_time.strftime('%H:%M:%S' )
return timestamp
| 62
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return np.sum(outputs == labels )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
with open(SCREAMING_SNAKE_CASE__ , encoding='utf_8' ) as f:
__UpperCamelCase =csv.reader(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
next(SCREAMING_SNAKE_CASE__ ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =[]
for dataset in encoded_datasets:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCamelCase =np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__UpperCamelCase =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) - 1
__UpperCamelCase =with_conta
__UpperCamelCase =with_conta
__UpperCamelCase =mc_label
__UpperCamelCase =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE__ ) for t in all_inputs ) )
return tensor_datasets
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE__ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--eval_dataset' , type=SCREAMING_SNAKE_CASE__ , default='' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--num_train_epochs' , type=SCREAMING_SNAKE_CASE__ , default=3 )
parser.add_argument('--train_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=8 )
parser.add_argument('--eval_batch_size' , type=SCREAMING_SNAKE_CASE__ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=SCREAMING_SNAKE_CASE__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=SCREAMING_SNAKE_CASE__ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=SCREAMING_SNAKE_CASE__ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=SCREAMING_SNAKE_CASE__ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=SCREAMING_SNAKE_CASE__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=SCREAMING_SNAKE_CASE__ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=SCREAMING_SNAKE_CASE__ , default=0.01 )
parser.add_argument('--lm_coef' , type=SCREAMING_SNAKE_CASE__ , default=0.9 )
parser.add_argument('--n_valid' , type=SCREAMING_SNAKE_CASE__ , default=3_74 )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
print(SCREAMING_SNAKE_CASE__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__UpperCamelCase =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCamelCase =['_start_', '_delimiter_', '_classify_']
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
model.to(SCREAMING_SNAKE_CASE__ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : str ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE__ ) for o in obj]
logger.info('Encoding dataset...' )
__UpperCamelCase =load_rocstories_dataset(args.train_dataset )
__UpperCamelCase =load_rocstories_dataset(args.eval_dataset )
__UpperCamelCase =(train_dataset, eval_dataset)
__UpperCamelCase =tokenize_and_encode(SCREAMING_SNAKE_CASE__ )
# Compute the max input length for the Transformer
__UpperCamelCase =model.config.n_positions // 2 - 2
__UpperCamelCase =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCamelCase =min(SCREAMING_SNAKE_CASE__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCamelCase =pre_process_datasets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =tensor_datasets[0], tensor_datasets[1]
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.train_batch_size )
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =SequentialSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCamelCase =args.max_steps
__UpperCamelCase =args.max_steps // (len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCamelCase =list(model.named_parameters() )
__UpperCamelCase =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
__UpperCamelCase =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
__UpperCamelCase =AdamW(SCREAMING_SNAKE_CASE__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCamelCase =get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE__ , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE__ )
if args.do_train:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =tqdm(SCREAMING_SNAKE_CASE__ , desc='Training' )
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCamelCase =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCamelCase ='Training loss: {:.2e} lr: {:.2e}'.format(SCREAMING_SNAKE_CASE__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCamelCase =model.module if hasattr(SCREAMING_SNAKE_CASE__ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE__ )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCamelCase =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCamelCase =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE__ )
if args.do_eval:
model.eval()
__UpperCamelCase , __UpperCamelCase =0, 0
__UpperCamelCase , __UpperCamelCase =0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE__ , desc='Evaluating' ):
__UpperCamelCase =tuple(t.to(SCREAMING_SNAKE_CASE__ ) for t in batch )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =batch
with torch.no_grad():
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =model(
SCREAMING_SNAKE_CASE__ , mc_token_ids=SCREAMING_SNAKE_CASE__ , lm_labels=SCREAMING_SNAKE_CASE__ , mc_labels=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =mc_logits.detach().cpu().numpy()
__UpperCamelCase =mc_labels.to('cpu' ).numpy()
__UpperCamelCase =accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCamelCase =eval_loss / nb_eval_steps
__UpperCamelCase =eval_accuracy / nb_eval_examples
__UpperCamelCase =tr_loss / nb_tr_steps if args.do_train else None
__UpperCamelCase ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
__UpperCamelCase =os.path.join(args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE__ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 62
| 1
|
import random
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :Dict ) -> tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE )
else:
equal.append(SCREAMING_SNAKE_CASE )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :int ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE ) or index < 0:
return None
__lowerCAmelCase : Union[str, Any] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )]
__lowerCAmelCase : int = 0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = _partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE , index - (m + count) )
| 232
|
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase = [8, 5, 9, 7]
_UpperCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class snake_case_ :
def __init__( self : Union[str, Any] , _snake_case : list[int] , _snake_case : list[list[int]] , _snake_case : list[list[int]] , )->None:
'''simple docstring'''
__lowerCAmelCase : str = claim_vector
__lowerCAmelCase : List[Any] = allocated_resources_table
__lowerCAmelCase : str = maximum_claim_table
def UpperCAmelCase__ ( self : Tuple )->list[int]:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase__ ( self : int )->list[int]:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase__ ( self : Optional[int] )->list[list[int]]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase__ ( self : Union[str, Any] )->dict[int, list[int]]:
'''simple docstring'''
return {self.__need().index(_snake_case ): i for i in self.__need()}
def UpperCAmelCase__ ( self : Dict , **_snake_case : Optional[Any] )->None:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : Any = self.__allocated_resources_table
__lowerCAmelCase : List[Any] = self.__available_resources()
__lowerCAmelCase : Optional[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_snake_case ):
if need > available_resources[index]:
__lowerCAmelCase : int = False
break
if execution:
__lowerCAmelCase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Any = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_snake_case )
# update available/freed resources stack
__lowerCAmelCase : int = np.array(_snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_snake_case ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def UpperCAmelCase__ ( self : List[Any] )->int:
'''simple docstring'''
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_snake_case ) + 1}'''
+ """ """.join(F'''{it:>8}''' for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_snake_case ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Tuple = attentions
if self.add_downsample:
UpperCAmelCase_ : List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: int=True ) -> int:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> int:
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Dict = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any]=True ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : List[str] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = True
A__ : bool = False
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: str ) -> Any:
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : int = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Optional[int] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[str]=True ) -> List[str]:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : Union[str, Any] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : int
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : bool = True
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> Dict:
UpperCAmelCase_ : Any = []
for i in range(self.num_layers ):
UpperCAmelCase_ : str = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : str = resnets
if self.add_upsample:
UpperCAmelCase_ : Union[str, Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self: Dict ,lowerCamelCase_: Dict ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Dict = res_hidden_states_tuple[-1]
UpperCAmelCase_ : str = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
UpperCAmelCase_ : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class _snake_case ( nn.Module ):
'''simple docstring'''
A__ : int
A__ : float = 0.0
A__ : int = 1
A__ : int = 1
A__ : bool = False
A__ : bool = False
A__ : jnp.dtype = jnp.floataa
def A__ ( self: Dict ) -> List[str]:
# there is always at least one resnet
UpperCAmelCase_ : List[Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : Any = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
UpperCAmelCase_ : Dict = resnets
UpperCAmelCase_ : Any = attentions
def __call__( self: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any]=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[Any] = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 345
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=14 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = use_mc_token_ids
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = self.vocab_size - 1
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , *__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
__SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
__SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , n_embd=37 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__A )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
__SCREAMING_SNAKE_CASE = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 351
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Any=3_0 , UpperCAmelCase__ : str=4_0_0 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=1 / 2_5_5 , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[int]=True , ) -> Any:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = do_pad
def __UpperCAmelCase ( self : int ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=False ) -> str:
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase = self.size['shortest_edge']
elif w > h:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase = self.size['shortest_edge']
lowerCAmelCase = self.size['shortest_edge']
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[0] )[0]
lowerCAmelCase = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = DetrImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
lowerCAmelCase = DetrImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> str:
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , 'do_pad' ) )
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCAmelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __UpperCAmelCase ( self : Dict ) -> str:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : int ) -> Tuple:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
# Initialize image_processing
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(UpperCAmelCase__ , return_tensors='pt' ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
# prepare image and target
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
@slow
def __UpperCAmelCase ( self : str ) -> str:
# prepare image, target and masks_path
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCAmelCase = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
# verify area
lowerCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCAmelCase__ ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCAmelCase__ )
lowerCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCAmelCase__ , atol=1E-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCAmelCase__ ) )
# verify class_labels
lowerCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCAmelCase__ ) )
# verify masks
lowerCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCAmelCase__ )
# verify orig_size
lowerCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCAmelCase__ ) )
# verify size
lowerCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCAmelCase__ ) )
| 4
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ ( a__ ):
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
requires_backends(self, 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
UpperCamelCase : Tuple = {}
UpperCamelCase : Dict = {}
if prompt is not None:
UpperCamelCase : Tuple = prompt
if generate_kwargs is not None:
UpperCamelCase : Optional[int] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCamelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
UpperCamelCase : int = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return super().__call__(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCamelCase : List[str] = load_image(SCREAMING_SNAKE_CASE_ )
if prompt is not None:
if not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""Received an invalid text input, got - {type(SCREAMING_SNAKE_CASE_ )} - but expected a single string. """
'Note also that one single text can be provided for conditional image to text generation.' )
UpperCamelCase : Dict = self.model.config.model_type
if model_type == "git":
UpperCamelCase : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors=self.framework )
UpperCamelCase : List[str] = self.tokenizer(text=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ ).input_ids
UpperCamelCase : List[Any] = [self.tokenizer.cls_token_id] + input_ids
UpperCamelCase : Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
UpperCamelCase : List[str] = self.image_processor(images=SCREAMING_SNAKE_CASE_, header_text=SCREAMING_SNAKE_CASE_, return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCamelCase : str = self.image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors=self.framework )
UpperCamelCase : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_, return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
UpperCamelCase : Tuple = self.image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCamelCase : List[str] = None
return model_inputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'], SCREAMING_SNAKE_CASE_ )
and all(x is None for x in model_inputs['input_ids'] )
):
UpperCamelCase : Tuple = None
if generate_kwargs is None:
UpperCamelCase : Any = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCamelCase : List[str] = model_inputs.pop(self.model.main_input_name )
UpperCamelCase : Union[str, Any] = self.model.generate(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
return model_outputs
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> str:
UpperCamelCase : Optional[Any] = []
for output_ids in model_outputs:
UpperCamelCase : List[Any] = {
'generated_text': self.tokenizer.decode(
SCREAMING_SNAKE_CASE_, skip_special_tokens=SCREAMING_SNAKE_CASE_, )
}
records.append(SCREAMING_SNAKE_CASE_ )
return records
| 103
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( a__ ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = "arrow", **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
split=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, keep_in_memory=SCREAMING_SNAKE_CASE_, streaming=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Any = load_from_cache_file
UpperCamelCase : Any = file_format
UpperCamelCase : str = Spark(
df=SCREAMING_SNAKE_CASE_, features=SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, working_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
def snake_case_ ( self ) -> Tuple:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCamelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 103
| 1
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __snake_case ( a ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_snake_case , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(_snake_case , '''num_attention_heads'''))
class __snake_case :
def __init__( self : Dict , _snake_case : Union[str, Any] , _snake_case : str=13 , _snake_case : int=64 , _snake_case : List[Any]=3 , _snake_case : Optional[int]=3 , _snake_case : List[Any]=2 , _snake_case : Dict=1 , _snake_case : Union[str, Any]=16 , _snake_case : Any=[128, 256, 384] , _snake_case : Any=[4, 6, 8] , _snake_case : Optional[int]=[2, 3, 4] , _snake_case : List[Any]=[16, 16, 16] , _snake_case : Union[str, Any]=0 , _snake_case : Optional[int]=[2, 2, 2] , _snake_case : Any=[2, 2, 2] , _snake_case : List[Any]=0.0_2 , _snake_case : List[str]=True , _snake_case : List[Any]=True , _snake_case : Any=2 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = kernel_size
UpperCAmelCase_ = stride
UpperCAmelCase_ = padding
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = depths
UpperCAmelCase_ = key_dim
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = attention_ratio
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = initializer_range
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCamelCase ( self : str , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = LevitModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
UpperCAmelCase_ = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4):
UpperCAmelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1)
UpperCAmelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4) * ceil(width / 4), self.hidden_sizes[-1]) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = LevitForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = False
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = LevitModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
def check_hidden_states_output(_snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any]):
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = len(self.model_tester.depths) + 1
self.assertEqual(len(_snake_case) , _snake_case)
UpperCAmelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase_ , UpperCAmelCase_ = image_size[0], image_size[1]
for _ in range(4):
UpperCAmelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
UpperCAmelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Any , _snake_case : Optional[int] , _snake_case : str , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_snake_case)
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
UpperCAmelCase_ = model(**_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(_snake_case)
model.gradient_checkpointing_enable()
model.to(_snake_case)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
UpperCAmelCase_ = model(**_snake_case).loss
loss.backward()
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_snake_case),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}"""):
UpperCAmelCase_ = problem_type['''title''']
UpperCAmelCase_ = problem_type['''num_labels''']
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.train()
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs['''labels'''].unsqueeze(1).repeat(1 , problem_type['''num_labels'''])
UpperCAmelCase_ = inputs['''labels'''].to(problem_type['''dtype'''])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_snake_case) as warning_list:
UpperCAmelCase_ = model(**_snake_case).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = LevitModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 51
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case_ : Tuple = logging.get_logger(__name__)
def A (__A : bool , __A : bool ) -> Optional[Any]:
"""simple docstring"""
def run_func(__A : Optional[Any] ):
@wraps(__A )
def run_in_eager_mode(*__A : Dict , **__A : List[Any] ):
return func(*__A , **__A )
@wraps(__A )
@tf.function(experimental_compile=__A )
def run_in_graph_mode(*__A : Optional[Any] , **__A : Any ):
return func(*__A , **__A )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def A (__A : int , __A : int , __A : int ) -> ["tf.Tensor"]:
"""simple docstring"""
UpperCAmelCase_ = random.Random()
UpperCAmelCase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__A , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __snake_case ( a ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return tf.__version__
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_inference)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_speed(_train)
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_inference_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_inference)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _snake_case)
UpperCAmelCase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''')
UpperCAmelCase_ = self._prepare_train_func(_snake_case , _snake_case , _snake_case)
return self._measure_memory(_train)
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(_snake_case , decoder_input_ids=_snake_case , training=_snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(_snake_case , training=_snake_case)
UpperCAmelCase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''')
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''')
UpperCAmelCase_ = (
hasattr(_snake_case , '''architectures''')
and isinstance(config.architectures , _snake_case)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCAmelCase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCAmelCase_ = __import__('''transformers''' , fromlist=[model_class])
UpperCAmelCase_ = getattr(_snake_case , _snake_case)
UpperCAmelCase_ = model_cls(_snake_case)
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''')
else:
UpperCAmelCase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_snake_case)
# encoder-decoder has vocab size saved differently
UpperCAmelCase_ = config.vocab_size if hasattr(_snake_case , '''vocab_size''') else config.encoder.vocab_size
UpperCAmelCase_ = random_input_ids(_snake_case , _snake_case , _snake_case)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
UpperCAmelCase_ = model(_snake_case , labels=_snake_case , training=_snake_case)[0]
UpperCAmelCase_ = tf.gradients(_snake_case , model.trainable_variables)
return gradients
UpperCAmelCase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''')
timeit.repeat(_snake_case , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCAmelCase_ = timeit.repeat(
_snake_case , repeat=self.args.repeat , number=10 , )
return min(_snake_case) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
def lowerCamelCase ( self : Dict , _snake_case : Callable[[], None]):
"""simple docstring"""
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''')
UpperCAmelCase_ = start_memory_tracing('''transformers''')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''')
UpperCAmelCase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''')
# init nvml
nvml.nvmlInit()
func()
UpperCAmelCase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
UpperCAmelCase_ = nvml.nvmlDeviceGetMemoryInfo(_snake_case)
UpperCAmelCase_ = meminfo.used
UpperCAmelCase_ = Memory(_snake_case)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''')
UpperCAmelCase_ = None
else:
UpperCAmelCase_ = measure_peak_memory_cpu(_snake_case)
UpperCAmelCase_ = Memory(_snake_case) if isinstance(_snake_case , _snake_case) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCAmelCase_ = stop_memory_tracing(_snake_case)
if memory is None:
UpperCAmelCase_ = summary.total
else:
UpperCAmelCase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""")
return "N/A", None
| 51
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _a ( _lowercase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = min(_lowercase ) # min() finds the minimum value
__UpperCAmelCase : str = max(_lowercase ) # max() finds the maximum value
__UpperCAmelCase : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__UpperCAmelCase : List[str] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowercase , _lowercase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__UpperCAmelCase : Union[str, Any] = 0
for count in range(_lowercase ):
while holes[count] > 0:
holes[count] -= 1
__UpperCAmelCase : Dict = count + min_val
i += 1
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowercase )
print('''Sorted order is:''' , ''' '''.join(_lowercase ) )
if __name__ == "__main__":
main()
| 240
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase :Any = logging.get_logger(__name__)
__UpperCAmelCase :Dict = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowercase : Tuple ):
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase : Any = k.replace(_lowercase , _lowercase )
if k.startswith('''encoder''' ):
__UpperCAmelCase : str = k.replace('''.attn''' , '''.self_attn''' )
__UpperCAmelCase : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : List[str] = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__UpperCAmelCase : int = k.replace('''norm1''' , '''self_attn_layer_norm''' )
__UpperCAmelCase : Union[str, Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
__UpperCAmelCase : List[Any] = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( _lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : int = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__UpperCAmelCase : Any = sd.pop(_lowercase )
__UpperCAmelCase : Optional[int] = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
__UpperCAmelCase : List[str] = v
__UpperCAmelCase :str = ["START"]
@torch.no_grad()
def _a ( _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase : List[str] = model['''model''']
__UpperCAmelCase : Optional[Any] = BlenderbotConfig.from_json_file(_lowercase )
__UpperCAmelCase : Optional[Any] = BlenderbotForConditionalGeneration(_lowercase )
__UpperCAmelCase : Optional[Any] = m.model.state_dict().keys()
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase : int = rename_state_dict_key(_lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase : Union[str, Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowercase )
m.model.load_state_dict(_lowercase , strict=_lowercase )
m.half()
m.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCAmelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCAmelCase :Tuple = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 240
| 1
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Union[str, Any]:
'''simple docstring'''
stooge(_lowerCamelCase , 0 , len(_lowerCamelCase) - 1)
return arr
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str]) -> Dict:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
__UpperCamelCase , __UpperCamelCase : int = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
__UpperCamelCase : Union[str, Any] = (int)((h - i + 1) / 3)
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase , _lowerCamelCase , (h - t))
# Recursively sort last 2/3 elements
stooge(_lowerCamelCase , i + t , (_lowerCamelCase))
# Recursively sort first 2/3 elements
stooge(_lowerCamelCase , _lowerCamelCase , (h - t))
if __name__ == "__main__":
lowercase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
lowercase : int = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 232
|
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image , _lowerCamelCase : int) -> Image:
'''simple docstring'''
__UpperCamelCase : str = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int) -> int:
return int(128 + factor * (c - 128))
return img.point(_lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
lowercase : Tuple = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 232
| 1
|
from manim import *
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : Dict )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Dict = [mem.copy() for i in range(6 )]
__lowerCAmelCase : List[Any] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : int = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : List[str] = VGroup(_snake_case , _snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Optional[int] = Text("""CPU""" , font_size=24 )
__lowerCAmelCase : List[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_snake_case )
__lowerCAmelCase : Tuple = [mem.copy() for i in range(4 )]
__lowerCAmelCase : Union[str, Any] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : Optional[Any] = Text("""GPU""" , font_size=24 )
__lowerCAmelCase : Optional[int] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
gpu.move_to([-1, -1, 0] )
self.add(_snake_case )
__lowerCAmelCase : List[str] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : int = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : List[str] = Text("""Model""" , font_size=24 )
__lowerCAmelCase : Optional[Any] = Group(_snake_case , _snake_case ).arrange(_snake_case , buff=0.5 , aligned_edge=_snake_case )
model.move_to([3, -1.0, 0] )
self.add(_snake_case )
__lowerCAmelCase : int = []
for i, rect in enumerate(_snake_case ):
rect.set_stroke(_snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCAmelCase : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_snake_case , buff=0.0 )
self.add(_snake_case )
cpu_targs.append(_snake_case )
__lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
__lowerCAmelCase : Optional[Any] = VGroup(*_snake_case ).arrange(_snake_case , buff=0 )
__lowerCAmelCase : str = Text("""Loaded Checkpoint""" , font_size=24 )
__lowerCAmelCase : str = Group(_snake_case , _snake_case ).arrange(_snake_case , aligned_edge=_snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowerCAmelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase : Union[str, Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_snake_case , _snake_case )
__lowerCAmelCase : int = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__lowerCAmelCase : Tuple = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_snake_case ) , Write(_snake_case ) )
self.play(Write(_snake_case , run_time=1 ) , Create(_snake_case , run_time=1 ) )
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = []
for i, rect in enumerate(_snake_case ):
__lowerCAmelCase : Optional[int] = fill.copy().set_fill(_snake_case , opacity=0.7 )
target.move_to(_snake_case )
first_animations.append(GrowFromCenter(_snake_case , run_time=1 ) )
__lowerCAmelCase : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_snake_case , run_time=1.5 ) )
self.play(*_snake_case )
self.play(*_snake_case )
self.wait()
| 232
|
import random
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :Dict ) -> tuple:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE )
else:
equal.append(SCREAMING_SNAKE_CASE )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list , SCREAMING_SNAKE_CASE :int ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE ) or index < 0:
return None
__lowerCAmelCase : Union[str, Any] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )]
__lowerCAmelCase : int = 0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = _partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE , index - (m + count) )
| 232
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE_: List[Any] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/dummy-config.json')
class __A ( unittest.TestCase ):
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = 0
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
UpperCAmelCase_ = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("bert-base" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _lowercase (self : Union[str, Any] ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any] ):
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 331
| 0
|
import string
from math import logaa
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
snake_case = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase_ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case = corpus_without_punctuation.split("\n" )
snake_case = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A_ ))
def lowercase_ ( A__ , A__ , A__=False ) -> Union[str, Any]:
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def lowercase_ ( A__ , A__ ) -> Dict:
"""simple docstring"""
return round(tf * idf , 3 )
| 358
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : List[Any] = "segformer"
def __init__(self : List[Any] , _A : int=3 , _A : List[Any]=4 , _A : Any=[2, 2, 2, 2] , _A : Dict=[8, 4, 2, 1] , _A : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , _A : Tuple=[7, 3, 3, 3] , _A : Optional[int]=[4, 2, 2, 2] , _A : Dict=[1, 2, 5, 8] , _A : int=[4, 4, 4, 4] , _A : Dict="gelu" , _A : Tuple=0.0 , _A : Optional[Any]=0.0 , _A : List[Any]=0.1 , _A : Union[str, Any]=0.02 , _A : Dict=0.1 , _A : List[Any]=1E-6 , _A : List[str]=2_5_6 , _A : Optional[Any]=2_5_5 , **_A : str , ) -> Tuple:
super().__init__(**_A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , _A , )
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = depths
snake_case = sr_ratios
snake_case = hidden_sizes
snake_case = patch_sizes
snake_case = strides
snake_case = mlp_ratios
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = drop_path_rate
snake_case = layer_norm_eps
snake_case = decoder_hidden_size
snake_case = kwargs.get("reshape_last_stage" , _A )
snake_case = semantic_loss_ignore_index
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase(self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : Tuple ) -> float:
return 1E-4
@property
def UpperCAmelCase(self : List[str] ) -> int:
return 1_2
| 137
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 103
|
from pathlib import Path
import fire
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ):
lowerCAmelCase_ : List[str] = Path(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase )
dest_dir.mkdir(exist_ok=__UpperCamelCase )
for path in src_dir.iterdir():
lowerCAmelCase_ : Optional[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowerCAmelCase_ : List[str] = dest_dir.joinpath(path.name )
print(__UpperCamelCase )
dest_path.open('''w''' ).write('''\n'''.join(__UpperCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 103
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A ( a ):
__A = ["""image_processor""", """tokenizer"""]
__A = """LayoutLMv2ImageProcessor"""
__A = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase_ , )
lowerCamelCase =kwargs.pop("""feature_extractor""" )
lowerCamelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
lowerCamelCase =self.image_processor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase =features["""words"""]
lowerCamelCase =self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
# add pixel values
lowerCamelCase =features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCamelCase =self.get_overflowing_images(UpperCAmelCase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCamelCase =images
return encoded_inputs
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(UpperCAmelCase_ )} and {len(UpperCAmelCase_ )}""" )
return images_with_overflow
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _snake_case ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _snake_case ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase_ , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase_ , )
return self.image_processor
| 262
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (preds == labels).mean()
@dataclass
class __A :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __A :
__A = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__A = field(metadata={"""help""": """Should contain the data files for the task."""} )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase =processors[data_args.task_name]()
lowerCamelCase =processor.get_labels()
lowerCamelCase =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase , p.label_ids )}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 262
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Tuple = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 240
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''audio''': Audio()} )
UpperCAmelCase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
UpperCAmelCase__ : str = "audio"
UpperCAmelCase__ : str = "labels"
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ) -> str:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,__snake_case ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
a__ = copy.deepcopy(self )
a__ = self.label_schema.copy()
a__ = features[self.label_column]
a__ = label_schema
return task_template
@property
def lowerCamelCase__( self :Dict ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 240
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
_SCREAMING_SNAKE_CASE = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__a ) != count_coins(__a ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCamelCase , UpperCamelCase = get_distrib(node.left )
UpperCamelCase , UpperCamelCase = get_distrib(node.right )
UpperCamelCase = 1 - left_distrib_excess
UpperCamelCase = 1 - right_distrib_excess
UpperCamelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(__a )
+ abs(__a )
)
UpperCamelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__a , __a )
return get_distrib(__a )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
| 165
| 0
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'new-model'
if is_tf_available():
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = NewModelConfig
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : List[Any] = "bert-base-cased"
__UpperCamelCase : Any = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Any = TFAutoModel.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :str ) -> int:
__UpperCamelCase : List[Any] = "bert-base-cased"
__UpperCamelCase : List[str] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :int ) -> Optional[int]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Any = TFAutoModelForCausalLM.from_pretrained(a )
__UpperCamelCase , __UpperCamelCase : Dict = TFAutoModelForCausalLM.from_pretrained(a , output_loading_info=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :str ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : int = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained(a )
__UpperCamelCase , __UpperCamelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(a , output_loading_info=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :Any ) -> str:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(a , output_loading_info=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :Dict ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
def _lowerCamelCase ( self :str ) -> Any:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Any = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Union[str, Any] = TFAutoModelForQuestionAnswering.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
@slow
@require_tensorflow_probability
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
__UpperCamelCase : Optional[int] = TFAutoModelForTableQuestionAnswering.from_pretrained(a )
__UpperCamelCase , __UpperCamelCase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
a , output_loading_info=a )
self.assertIsNotNone(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_4_4_1_0 )
def _lowerCamelCase ( self :Optional[int] ) -> Any:
__UpperCamelCase : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(a )
self.assertIsInstance(a , a )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=a ) , 1_4_4_1_0 )
def _lowerCamelCase ( self :Dict ) -> List[str]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__UpperCamelCase : str = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(a , a )
__UpperCamelCase : Dict = copy.deepcopy(model.config )
__UpperCamelCase : Dict = ["FunnelBaseModel"]
__UpperCamelCase : Optional[Any] = TFAutoModel.from_config(a )
self.assertIsInstance(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a )
__UpperCamelCase : List[Any] = TFAutoModel.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self :List[str] ) -> str:
try:
AutoConfig.register("new-model" , a )
__UpperCamelCase : List[str] = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a ):
auto_class.register(a , a )
auto_class.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
auto_class.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : Union[str, Any] = BertModelTester(self ).get_config()
__UpperCamelCase : Optional[int] = NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Optional[int] = auto_class.from_config(a )
self.assertIsInstance(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a )
__UpperCamelCase : Optional[int] = auto_class.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowerCamelCase ( self :Any ) -> int:
with self.assertRaisesRegex(
a , "bert-base is not a local folder and is not a valid model identifier" ):
__UpperCamelCase : List[str] = TFAutoModel.from_pretrained("bert-base" )
def _lowerCamelCase ( self :int ) -> str:
with self.assertRaisesRegex(
a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__UpperCamelCase : Tuple = TFAutoModel.from_pretrained(a , revision="aaaaaa" )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
with self.assertRaisesRegex(
a , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
__UpperCamelCase : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _lowerCamelCase ( self :Dict ) -> List[str]:
with self.assertRaisesRegex(a , "Use `from_pt=True` to load this model" ):
__UpperCamelCase : Optional[Any] = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def _lowerCamelCase ( self :int ) -> Dict:
# Make sure we have cached the model.
__UpperCamelCase : str = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__UpperCamelCase : int = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Optional[Any] = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__UpperCamelCase : int = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 232
|
import argparse
import datetime
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
__UpperCamelCase : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__UpperCamelCase : List[str] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
__UpperCamelCase : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
__UpperCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
__UpperCamelCase : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
__UpperCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
__UpperCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
__UpperCamelCase : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
__UpperCamelCase : Any = y - 1
__UpperCamelCase : Optional[Any] = m + 12
# maths var
__UpperCamelCase : int = int(str(_lowerCamelCase)[:2])
__UpperCamelCase : int = int(str(_lowerCamelCase)[2:])
__UpperCamelCase : int = int(2.6 * m - 5.3_9)
__UpperCamelCase : int = int(c / 4)
__UpperCamelCase : int = int(k / 4)
__UpperCamelCase : int = int(d + k)
__UpperCamelCase : int = int(t + u + v + x)
__UpperCamelCase : int = int(z - (2 * c))
__UpperCamelCase : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
__UpperCamelCase : str = F'Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[int] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
lowercase : Tuple = parser.parse_args()
zeller(args.date_input)
| 232
| 1
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> list:
if any(not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(UpperCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 237
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : List[Any] ="unispeech-sat"
def __init__( self : Dict , a : str=32 , a : Any=7_68 , a : Optional[Any]=12 , a : Optional[int]=12 , a : int=30_72 , a : int="gelu" , a : Dict=0.1 , a : Dict=0.1 , a : List[Any]=0.1 , a : Tuple=0.0 , a : Optional[Any]=0.0 , a : Tuple=0.1 , a : List[Any]=0.1 , a : str=0.02 , a : List[Any]=1e-5 , a : int="group" , a : Union[str, Any]="gelu" , a : Optional[int]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , a : List[Any]=(5, 2, 2, 2, 2, 2, 2) , a : int=(10, 3, 3, 3, 3, 2, 2) , a : Optional[Any]=False , a : Any=1_28 , a : Tuple=16 , a : str=False , a : Optional[Any]=True , a : Dict=0.05 , a : List[Any]=10 , a : Any=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Any=3_20 , a : str=2 , a : List[str]=0.1 , a : List[str]=1_00 , a : List[str]=2_56 , a : str=2_56 , a : Dict=0.1 , a : Optional[Any]="mean" , a : str=False , a : Tuple=False , a : Optional[Any]=2_56 , a : int=(5_12, 5_12, 5_12, 5_12, 15_00) , a : int=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : Union[str, Any]=5_12 , a : Optional[int]=0 , a : Optional[int]=1 , a : Optional[int]=2 , a : int=5_04 , **a : Dict , ):
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__lowerCamelCase = hidden_size
__lowerCamelCase = feat_extract_norm
__lowerCamelCase = feat_extract_activation
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = conv_bias
__lowerCamelCase = num_conv_pos_embeddings
__lowerCamelCase = num_conv_pos_embedding_groups
__lowerCamelCase = len(self.conv_dim )
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation_dropout
__lowerCamelCase = feat_proj_dropout
__lowerCamelCase = final_dropout
__lowerCamelCase = layerdrop
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = vocab_size
__lowerCamelCase = num_clusters
__lowerCamelCase = do_stable_layer_norm
__lowerCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCamelCase = apply_spec_augment
__lowerCamelCase = mask_time_prob
__lowerCamelCase = mask_time_length
__lowerCamelCase = mask_time_min_masks
__lowerCamelCase = mask_feature_prob
__lowerCamelCase = mask_feature_length
__lowerCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCamelCase = num_codevectors_per_group
__lowerCamelCase = num_codevector_groups
__lowerCamelCase = contrastive_logits_temperature
__lowerCamelCase = feat_quantizer_dropout
__lowerCamelCase = num_negatives
__lowerCamelCase = codevector_dim
__lowerCamelCase = proj_codevector_dim
__lowerCamelCase = diversity_loss_weight
# ctc loss
__lowerCamelCase = ctc_loss_reduction
__lowerCamelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = list(a )
__lowerCamelCase = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 237
| 1
|
import os
def A ( ) -> List[Any]:
with open(os.path.dirname(a_ ) + '/grid.txt' ) as f:
__UpperCamelCase : str =[] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
__UpperCamelCase : List[Any] =0
# right
for i in range(20 ):
for j in range(17 ):
__UpperCamelCase : Tuple =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCamelCase : Dict =temp
# down
for i in range(17 ):
for j in range(20 ):
__UpperCamelCase : Optional[int] =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCamelCase : int =temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__UpperCamelCase : Tuple =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCamelCase : int =temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
__UpperCamelCase : Optional[Any] =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCamelCase : Union[str, Any] =temp
return maximum
if __name__ == "__main__":
print(solution())
| 71
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
a_ : List[Any] = logging.getLogger(__name__)
a_ : Dict = 'pytorch_model.bin'
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_lowercase : str = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_lowercase : Optional[str] = dataclasses.field(
default=A__ , metadata={'''help''': '''The name of the task to train on.'''} , )
_lowercase : Optional[List[str]] = dataclasses.field(
default=A__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class _snake_case :
_lowercase : str = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_lowercase : Optional[str] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_lowercase : Optional[str] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'''
} , )
_lowercase : Optional[int] = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_lowercase : Optional[bool] = dataclasses.field(
default=A__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_lowercase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_lowercase : Optional[int] = dataclasses.field(
default=1_00 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_lowercase : Optional[int] = dataclasses.field(
default=A__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE = dataset.filter(lambda _UpperCAmelCase: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE = int(eval_result * len(_UpperCAmelCase))
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset.sort('probability' , reverse=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset.select(range(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = dataset.remove_columns(['label', 'probability'])
SCREAMING_SNAKE_CASE = dataset.rename_column('prediction' , 'label')
SCREAMING_SNAKE_CASE = dataset.map(lambda _UpperCAmelCase: {"label": idalabel[example["label"]]})
SCREAMING_SNAKE_CASE = dataset.shuffle(seed=args.seed)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.data_file_extension == "csv":
dataset.to_csv(_UpperCAmelCase , index=_UpperCAmelCase)
else:
dataset.to_json(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE = STModelArguments(model_name_or_path=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = STDataArguments(train_file=_UpperCAmelCase , infer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = STTrainingArguments(output_dir=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_UpperCAmelCase).items():
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
for key, value in kwargs.items():
if hasattr(_UpperCAmelCase , _UpperCAmelCase):
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# Sanity checks
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE = args.train_file
SCREAMING_SNAKE_CASE = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE = args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE = data_files[key].split('.')[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info('Creating the initial data directory for self-training...')
SCREAMING_SNAKE_CASE = F'''{args.output_dir}/self-train_iter-{{}}'''.format
SCREAMING_SNAKE_CASE = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_UpperCAmelCase)
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
# Show the progress bar
SCREAMING_SNAKE_CASE = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
SCREAMING_SNAKE_CASE = data_dir_format(_UpperCAmelCase)
assert os.path.exists(_UpperCAmelCase)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'stage-1')
SCREAMING_SNAKE_CASE = {
'accelerator': accelerator,
'model_name_or_path': args.model_name_or_path,
'cache_dir': args.cache_dir,
'do_train': True,
'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'],
'do_eval': True if args.eval_file is not None else False,
'eval_file': data_files['eval'],
'do_predict': True,
'infer_file': data_files['infer'],
'task_name': args.task_name,
'label_list': args.label_list,
'output_dir': current_output_dir,
'eval_metric': args.eval_metric,
'evaluation_strategy': args.evaluation_strategy,
'early_stopping_patience': args.early_stopping_patience,
'early_stopping_threshold': args.early_stopping_threshold,
'seed': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_UpperCAmelCase , _UpperCAmelCase):
arguments_dict.update({key: value})
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint' , _UpperCAmelCase)
if os.path.exists(_UpperCAmelCase):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _UpperCAmelCase)
finetune(**_UpperCAmelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase)
logger.info('Self-training job completed: iteration: %d, stage: 1.' , _UpperCAmelCase)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint')
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'stage-2')
# Update arguments_dict
SCREAMING_SNAKE_CASE = model_path
SCREAMING_SNAKE_CASE = data_files['train']
SCREAMING_SNAKE_CASE = current_output_dir
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'best-checkpoint' , _UpperCAmelCase)
if os.path.exists(_UpperCAmelCase):
logger.info(
'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _UpperCAmelCase , _UpperCAmelCase , )
else:
logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _UpperCAmelCase)
finetune(**_UpperCAmelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_UpperCAmelCase)
logger.info('Self-training job completed: iteration: %d, stage: 2.' , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = iteration
SCREAMING_SNAKE_CASE = data_dir_format(iteration + 1)
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(os.path.join(_UpperCAmelCase , 'best-checkpoint'))
SCREAMING_SNAKE_CASE = config.idalabel
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'eval_results_best-checkpoint.json')
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'test_results_best-checkpoint.json')
assert os.path.exists(_UpperCAmelCase)
with open(_UpperCAmelCase , 'r') as f:
SCREAMING_SNAKE_CASE = float(json.load(_UpperCAmelCase)[args.eval_metric])
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'infer_output_best-checkpoint.csv')
assert os.path.exists(_UpperCAmelCase)
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']})['data']
SCREAMING_SNAKE_CASE = load_dataset('csv' , data_files={'data': infer_output_file})['data']
if accelerator.is_main_process:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''eval_results_iter-{iteration}.json'''))
if os.path.exists(_UpperCAmelCase):
shutil.copy(_UpperCAmelCase , os.path.join(_UpperCAmelCase , F'''test_results_iter-{iteration}.json'''))
create_pseudo_labeled_data(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE = eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
SCREAMING_SNAKE_CASE = 0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE = new_iteration
SCREAMING_SNAKE_CASE = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('Best iteration: %d' , _UpperCAmelCase)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _UpperCAmelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , F'''eval_results_iter-{iteration}.json''') , os.path.join(_UpperCAmelCase , 'eval_results_best-iteration.json') , )
else:
# Assume that the last iteration is the best
logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1)
logger.info('Best evaluation result: %s = %f' , args.eval_metric , _UpperCAmelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_UpperCAmelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(_UpperCAmelCase , 'eval_results_best-iteration.json') , )
| 137
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = num_patches + 1
def UpperCAmelCase_ (self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = (self.image_size, self.image_size)
UpperCamelCase__ = (self.patch_size, self.patch_size)
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.type_sequence_label_size
UpperCamelCase__ = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ = 1
UpperCamelCase__ = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxViTModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCAmelCase_ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("""JIT Enabled""" ):
UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCamelCase__ = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ (self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
UpperCamelCase__ = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 178
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase_ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase_ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCamelCase_ = spec.loader.load_module()
lowerCamelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase_ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCamelCase_ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCamelCase__ = False
# source code of `config_class`
UpperCamelCase__ = inspect.getsource(__a )
UpperCamelCase__ = _re_checkpoint.findall(__a )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCamelCase__ , UpperCamelCase__ = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ = True
break
UpperCamelCase__ = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__a )
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(sorted(__a ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 178
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.