code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
SCREAMING_SNAKE_CASE_ = float('nan')
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = sys.stdout
__a = open(SCREAMING_SNAKE_CASE__ , """a""" )
def __getattr__( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return getattr(self.stdout , SCREAMING_SNAKE_CASE__ )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) )
def __lowercase ( __SCREAMING_SNAKE_CASE=80 , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
__a = []
# deal with critical env vars
__a = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
__a = os.environ.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__a = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(__SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__a = []
__a = """"""
while len(__SCREAMING_SNAKE_CASE ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(__SCREAMING_SNAKE_CASE ) == 0 or len(__SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__SCREAMING_SNAKE_CASE )
__a = """"""
return "\\\n".join(__SCREAMING_SNAKE_CASE )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__a = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
__a = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__a = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
__a = subprocess.run(__SCREAMING_SNAKE_CASE , capture_output=__SCREAMING_SNAKE_CASE , text=__SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
__a = variation.replace(""" """ , """-""" )
with open(Path(__SCREAMING_SNAKE_CASE ) / F'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(__SCREAMING_SNAKE_CASE ) / F'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
__a = json.load(__SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
__a = []
__a = []
__a = F'''{id}: {variation:<{longest_variation_len}}'''
__a = F'''{preamble}: '''
__a = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__SCREAMING_SNAKE_CASE ) , desc=__SCREAMING_SNAKE_CASE , leave=__SCREAMING_SNAKE_CASE ):
__a = process_run_single(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = single_run_metrics[target_metric_key]
if not math.isnan(__SCREAMING_SNAKE_CASE ):
metrics.append(__SCREAMING_SNAKE_CASE )
results.append(__SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
__a = F'''\33[2K\r{outcome}'''
if len(__SCREAMING_SNAKE_CASE ) > 0:
__a = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__a = round(mean_metrics[target_metric_key] , 2 )
__a = F'''{outcome} {mean_target}'''
if len(__SCREAMING_SNAKE_CASE ) > 1:
results_str += F''' {tuple(round(__SCREAMING_SNAKE_CASE , 2 ) for x in results )}'''
print(__SCREAMING_SNAKE_CASE )
__a = variation
return mean_metrics
else:
print(__SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def __lowercase ( ) -> Optional[int]:
"""simple docstring"""
__a = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return F'''
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__a = pd.DataFrame(__SCREAMING_SNAKE_CASE )
__a = """variation"""
__a = """diff_%"""
__a = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__a = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
__a = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__SCREAMING_SNAKE_CASE ):
__a = df.apply(
lambda __SCREAMING_SNAKE_CASE : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
__a = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__a = df.reindex(__SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
__a = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
__a = df.rename(lambda __SCREAMING_SNAKE_CASE : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
__a = df.rename(lambda __SCREAMING_SNAKE_CASE : c.replace("""_""" , """\n""" ) , axis="""columns""" )
__a = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(__SCREAMING_SNAKE_CASE ) )
def __lowercase ( ) -> str:
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , nargs="""+""" , required=__SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=__SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=__SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
__a = parser.parse_args()
__a = args.output_dir
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
__a = get_base_command(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
__a = [list(map(str.strip , re.split(r"""\|""" , __SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__a = list(map(str.strip , map(""" """.join , itertools.product(*__SCREAMING_SNAKE_CASE ) ) ) )
__a = max(len(__SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
__a = args.report_metric_keys.split()
# capture prints into a log file for convenience
__a = F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__a = Tee(__SCREAMING_SNAKE_CASE )
print(F'''\n*** Running {len(__SCREAMING_SNAKE_CASE )} benchmarks:''' )
print(F'''Base command: {' '.join(__SCREAMING_SNAKE_CASE )}''' )
__a = """variation"""
__a = []
for id, variation in enumerate(tqdm(__SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=__SCREAMING_SNAKE_CASE ) ):
__a = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.target_metric_key , __SCREAMING_SNAKE_CASE , args.repeat_times , __SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(__SCREAMING_SNAKE_CASE , args.target_metric_key , __SCREAMING_SNAKE_CASE , args.base_variation , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 582
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = params
__a = np.array(SCREAMING_SNAKE_CASE__ )
__a = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.lengths )
def __a ( self : str ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __a ( self : Dict ):
'''simple docstring'''
__a = self.params.max_model_input_size
__a = self.lengths > max_len
logger.info(f'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
__a = []
__a = []
if self.params.mlm:
__a , __a = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
__a , __a = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__a = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__a = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
__a = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
__a = np.array(SCREAMING_SNAKE_CASE__ )
__a = np.array(SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = len(self )
__a = self.lengths > 1_1
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __a ( self : Optional[Any] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
__a = self.params.special_tok_ids["""unk_token"""]
__a = len(self )
__a = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__a = (unk_occs / self.lengths) < 0.5
__a = self.token_ids[indices]
__a = self.lengths[indices]
__a = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __a ( self : str ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = [t[0] for t in batch]
__a = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
__a = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
__a = self.params.special_tok_ids["""pad_token"""]
else:
__a = self.params.special_tok_ids["""unk_token"""]
__a = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
__a = torch.tensor(tk_ ) # (bs, max_seq_len_)
__a = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 582
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase_ ( UpperCamelCase__):
lowerCamelCase_ = 'segformer'
def __init__( self : Optional[int] , __A : List[Any]=3 , __A : Optional[Any]=4 , __A : int=[2, 2, 2, 2] , __A : Union[str, Any]=[8, 4, 2, 1] , __A : Optional[int]=[32, 64, 160, 256] , __A : int=[7, 3, 3, 3] , __A : Tuple=[4, 2, 2, 2] , __A : str=[1, 2, 5, 8] , __A : Union[str, Any]=[4, 4, 4, 4] , __A : Any="gelu" , __A : List[str]=0.0 , __A : List[Any]=0.0 , __A : Any=0.1 , __A : Optional[int]=0.02 , __A : List[Any]=0.1 , __A : int=1E-6 , __A : int=256 , __A : Dict=255 , **__A : Any , ) ->List[Any]:
"""simple docstring"""
super().__init__(**__A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __A , )
a__ :Optional[Any] = num_channels
a__ :List[Any] = num_encoder_blocks
a__ :str = depths
a__ :Any = sr_ratios
a__ :Any = hidden_sizes
a__ :str = patch_sizes
a__ :int = strides
a__ :str = mlp_ratios
a__ :List[Any] = num_attention_heads
a__ :List[str] = hidden_act
a__ :Dict = hidden_dropout_prob
a__ :List[Any] = attention_probs_dropout_prob
a__ :Any = classifier_dropout_prob
a__ :Optional[Any] = initializer_range
a__ :Optional[int] = drop_path_rate
a__ :Union[str, Any] = layer_norm_eps
a__ :Dict = decoder_hidden_size
a__ :Optional[Any] = kwargs.get("reshape_last_stage" , __A )
a__ :Dict = semantic_loss_ignore_index
class lowerCAmelCase_ ( UpperCamelCase__):
lowerCamelCase_ = version.parse('1.11')
@property
def _snake_case ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self : Optional[Any] ) ->float:
"""simple docstring"""
return 1E-4
@property
def _snake_case ( self : str ) ->int:
"""simple docstring"""
return 12
| 715
|
def lowerCamelCase__ ( a : int = 1_000_000 ) -> int:
"""simple docstring"""
a__ :int = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , a ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 373
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase :List[str] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ['''MobileViTFeatureExtractor''']
lowerCamelCase :Union[str, Any] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[int] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Any = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667
|
'''simple docstring'''
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : List[str] = name
A_ : Dict = value
A_ : Optional[int] = weight
def __repr__(self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _a (self ):
return self.value
def _a (self ):
return self.name
def _a (self ):
return self.weight
def _a (self ):
return self.value / self.weight
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for i in range(len(lowerCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = sorted(lowerCamelCase__ , key=lowerCamelCase__ , reverse=lowerCamelCase__ )
A_ : Any = []
A_, A_ : Tuple = 0.0, 0.0
for i in range(len(lowerCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[str] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class a__( lowerCamelCase__ ):
lowercase__ = """imagegpt"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , __snake_case : int=5_12 + 1 , __snake_case : Optional[Any]=32 * 32 , __snake_case : Optional[Any]=5_12 , __snake_case : List[str]=24 , __snake_case : Tuple=8 , __snake_case : Optional[int]=None , __snake_case : Dict="quick_gelu" , __snake_case : Dict=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : Tuple=1e-5 , __snake_case : Union[str, Any]=0.02 , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : str=False , __snake_case : Any=False , __snake_case : List[Any]=False , **__snake_case : List[str] , ):
a : Optional[Any] = vocab_size
a : Union[str, Any] = n_positions
a : int = n_embd
a : int = n_layer
a : str = n_head
a : int = n_inner
a : int = activation_function
a : Any = resid_pdrop
a : Tuple = embd_pdrop
a : List[Any] = attn_pdrop
a : Tuple = layer_norm_epsilon
a : Any = initializer_range
a : str = scale_attn_weights
a : List[Any] = use_cache
a : Any = scale_attn_by_inverse_layer_idx
a : Optional[Any] = reorder_and_upcast_attn
a : str = tie_word_embeddings
super().__init__(tie_word_embeddings=__snake_case , **__snake_case )
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Union[str, Any] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def lowercase_ ( self : Union[str, Any] , __snake_case : "FeatureExtractionMixin" , __snake_case : int = 1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional["TensorType"] = None , __snake_case : int = 3 , __snake_case : int = 32 , __snake_case : int = 32 , ):
a : Dict = self._generate_dummy_images(__snake_case , __snake_case , __snake_case , __snake_case )
a : Union[str, Any] = dict(preprocessor(images=__snake_case , return_tensors=__snake_case ) )
return inputs
| 195
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__( metaclass=lowerCamelCase__ ):
lowercase__ = ["""torch""", """torchsde"""]
def __init__( self : Any , *__snake_case : List[str] , **__snake_case : Tuple ):
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def lowercase_ ( cls : List[Any] , *__snake_case : str , **__snake_case : List[Any] ):
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def lowercase_ ( cls : Dict , *__snake_case : Optional[Any] , **__snake_case : str ):
requires_backends(cls , ['torch', 'torchsde'] )
| 195
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a :List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Any = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :int = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Tuple = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
a :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680
|
"""simple docstring"""
__A : Optional[int] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 602
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple:
"""simple docstring"""
__UpperCAmelCase : int = namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = LDMTextToImagePipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
lowercase_ = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = False
def a_ ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
__UpperCAmelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0)
__UpperCAmelCase : Optional[int] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0)
__UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def a_ ( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Tuple = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = LDMTextToImagePipeline(**UpperCamelCase_)
pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = pipe(**UpperCamelCase_).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__UpperCAmelCase : List[str] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Dict=0):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = np.random.RandomState(UpperCamelCase_).standard_normal((1, 4, 32, 32))
__UpperCAmelCase : str = torch.from_numpy(UpperCamelCase_).to(device=UpperCamelCase_ , dtype=UpperCamelCase_)
__UpperCAmelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = pipe(**UpperCamelCase_).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__UpperCAmelCase : List[Any] = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878])
__UpperCAmelCase : List[Any] = np.abs(expected_slice - image_slice).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Optional[int]=0):
"""simple docstring"""
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : Tuple = np.random.RandomState(UpperCamelCase_).standard_normal((1, 4, 32, 32))
__UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCamelCase_).to(device=UpperCamelCase_ , dtype=UpperCamelCase_)
__UpperCAmelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = self.get_inputs(UpperCamelCase_)
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images[0]
__UpperCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy")
__UpperCAmelCase : int = np.abs(expected_image - image).max()
assert max_diff < 1e-3
| 487
| 1
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase_ ( yaml.SafeLoader ):
def _snake_case ( self :List[str] , __A :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ = [tuple(__A ) if isinstance(__A , __A ) else key for key in keys]
SCREAMING_SNAKE_CASE__ = Counter(__A )
SCREAMING_SNAKE_CASE__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _snake_case ( self :Optional[int] , __A :List[Any] , __A :Union[str, Any]=False ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().construct_mapping(__A , deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ = full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE__ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# class attributes
lowerCamelCase_ = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls :int , __A :Path ) -> "DatasetMetadata":
"""simple docstring"""
with open(__A , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _snake_case ( self :int , __A :Path ) -> str:
"""simple docstring"""
if path.exists():
with open(__A , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE__ = readme_file.read()
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self._to_readme(__A )
with open(__A , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(__A )
def _snake_case ( self :int , __A :Optional[str] = None ) -> str:
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _split_yaml_from_readme(__A )
SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE__ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _snake_case ( cls :Tuple , __A :str ) -> "DatasetMetadata":
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = yaml.load(__A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__A , allow_unicode=__A , encoding="""utf-8""" , ).decode("""utf-8""" )
_lowerCamelCase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowerCamelCase = ap.parse_args()
_lowerCamelCase = Path(args.readme_filepath)
_lowerCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 6
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ) -> Any:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : str ) -> List[Any]:
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> Tuple:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Dict ) -> Any:
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
A = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
A = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
# pass variant but use the non-variant filenames
A = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
A = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> int:
# pass variant but use the non-variant filenames
A = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
A = 'fp16'
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
A = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
A = 'fp16'
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 106
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCamelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCamelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCamelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def __lowercase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowercase ( self :List[Any] , __lowercase :str , __lowercase :Optional[int] ):
__lowerCamelCase : Optional[int] ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
__lowerCamelCase : Dict =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
__lowerCamelCase : List[str] =evaluate(dataset=__lowercase , predictions=__lowercase )
return score
| 700
|
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Tuple , __lowercase :Any ):
__lowerCamelCase : str =data
__lowerCamelCase : Any =None
def __repr__( self :Optional[Any] ):
return f'Node({self.data})'
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Optional[int] ):
__lowerCamelCase : Union[str, Any] =None
def __iter__( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =self.head
while node:
yield node.data
__lowerCamelCase : str =node.next
def __len__( self :Any ):
return sum(1 for _ in self )
def __repr__( self :Any ):
return "->".join([str(__lowercase ) for item in self] )
def __getitem__( self :Dict , __lowercase :int ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self :Dict , __lowercase :int , __lowercase :Any ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__lowerCamelCase : Optional[Any] =self.head
for _ in range(__lowercase ):
__lowerCamelCase : List[str] =current.next
__lowerCamelCase : Optional[int] =data
def __lowercase ( self :Tuple , __lowercase :Any ):
self.insert_nth(len(self ) , __lowercase )
def __lowercase ( self :int , __lowercase :Any ):
self.insert_nth(0 , __lowercase )
def __lowercase ( self :Dict , __lowercase :int , __lowercase :Any ):
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__lowerCamelCase : List[Any] =Node(__lowercase )
if self.head is None:
__lowerCamelCase : List[Any] =new_node
elif index == 0:
__lowerCamelCase : Optional[Any] =self.head # link new_node to head
__lowerCamelCase : Optional[Any] =new_node
else:
__lowerCamelCase : str =self.head
for _ in range(index - 1 ):
__lowerCamelCase : Any =temp.next
__lowerCamelCase : List[Any] =temp.next
__lowerCamelCase : List[Any] =new_node
def __lowercase ( self :Tuple ): # print every node data
print(self )
def __lowercase ( self :List[str] ):
return self.delete_nth(0 )
def __lowercase ( self :Any ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self :Dict , __lowercase :int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__lowerCamelCase : List[Any] =self.head # default first node
if index == 0:
__lowerCamelCase : int =self.head.next
else:
__lowerCamelCase : int =self.head
for _ in range(index - 1 ):
__lowerCamelCase : str =temp.next
__lowerCamelCase : int =temp.next
__lowerCamelCase : Optional[Any] =temp.next.next
return delete_node.data
def __lowercase ( self :Any ):
return self.head is None
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =None
__lowerCamelCase : Optional[int] =self.head
while current:
# Store the current node's next node.
__lowerCamelCase : Any =current.next
# Make the current node's next point backwards
__lowerCamelCase : Optional[Any] =prev
# Make the previous node be the current node
__lowerCamelCase : List[str] =current
# Make the current node the next node (to progress iteration)
__lowerCamelCase : Tuple =next_node
# Return prev in order to put the head at the end
__lowerCamelCase : List[str] =prev
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Tuple =LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__lowerCamelCase : List[Any] =-i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : Any =[
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.55_555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__lowerCamelCase : str =LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__lowerCamelCase : str =linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__lowerCamelCase : Union[str, Any] =linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__lowerCamelCase : Union[str, Any] =linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__lowerCamelCase : Optional[Any] =LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(SCREAMING_SNAKE_CASE )
print('''\nReading/changing Node data using indexing:''' )
print(F'Element at Position 1: {linked_list[1]}' )
__lowerCamelCase : Any =input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(SCREAMING_SNAKE_CASE )
print(F'length of linked_list is : {len(SCREAMING_SNAKE_CASE )}' )
if __name__ == "__main__":
main()
| 363
| 0
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
from math import sqrt
def lowerCamelCase_ ( UpperCamelCase_ = 100_0000 ):
_a : int = 0
_a : int = 0
_a : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCamelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 471
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str]=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE__ = subparsers.add_parser("""test""" )
else:
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
SCREAMING_SNAKE_CASE__ = script_name
else:
SCREAMING_SNAKE_CASE__ = f"""--config_file={args.config_file} {script_name}"""
SCREAMING_SNAKE_CASE__ = ["""accelerate-launch"""] + test_args.split()
SCREAMING_SNAKE_CASE__ = execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = test_command_parser()
SCREAMING_SNAKE_CASE__ = parser.parse_args()
test_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 379
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "nllb-moe"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowercase : Tuple=12_81_12 , _lowercase : List[Any]=10_24 , _lowercase : Any=12 , _lowercase : List[Any]=40_96 , _lowercase : str=16 , _lowercase : str=12 , _lowercase : Optional[int]=40_96 , _lowercase : List[Any]=16 , _lowercase : str=0.05 , _lowercase : Tuple=0.05 , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : Optional[Any]="relu" , _lowercase : str=10_24 , _lowercase : Tuple=0.1 , _lowercase : int=0.1 , _lowercase : Dict=0.0 , _lowercase : List[str]=0.02 , _lowercase : int=2 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=False , _lowercase : List[str]="float32" , _lowercase : Optional[Any]=False , _lowercase : str=1_28 , _lowercase : int=64 , _lowercase : Optional[int]=4 , _lowercase : List[str]=4 , _lowercase : Union[str, Any]=0.0_01 , _lowercase : List[Any]=0.0_01 , _lowercase : List[str]="all" , _lowercase : Optional[Any]=False , _lowercase : int=False , _lowercase : Tuple=1.0 , _lowercase : Optional[int]=0.2 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=0 , _lowercase : List[Any]=2 , _lowercase : int=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = router_z_loss_coef
SCREAMING_SNAKE_CASE__ = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ = decoder_sparse_step
SCREAMING_SNAKE_CASE__ = encoder_sparse_step
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ = second_expert_policy
SCREAMING_SNAKE_CASE__ = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ = moe_token_dropout
SCREAMING_SNAKE_CASE__ = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 379
| 1
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
def wrapper(*__lowerCamelCase , **__lowerCamelCase ):
UpperCAmelCase__ : str = timeit.default_timer()
UpperCAmelCase__ : Union[str, Any] = func(*__lowerCamelCase , **__lowerCamelCase )
UpperCAmelCase__ : int = timeit.default_timer() - starttime
return delta
UpperCAmelCase__ : Dict = func.__name__
return wrapper
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = seq_shapes or {}
for i in range(__lowerCamelCase ):
UpperCAmelCase__ : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
UpperCAmelCase__ : Tuple = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase__ : Optional[int] = """The small grey turtle was surprisingly fast when challenged."""
else:
UpperCAmelCase__ : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
UpperCAmelCase__ : str = v.feature
UpperCAmelCase__ : str = seq_shapes[k]
UpperCAmelCase__ : Any = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
UpperCAmelCase__ : Any = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase__ : List[Any] = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCAmelCase__ : Any = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 79
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ) -> List[str]:
# Initialise PyTorch model
lowerCamelCase_ = RemBertConfig.from_json_file(_lowerCamelCase )
print('Building PyTorch model from configuration: {}'.format(str(_lowerCamelCase ) ) )
lowerCamelCase_ = RemBertModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(_lowerCamelCase ) )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 137
| 0
|
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :int ):
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''The length of profit and weight must be same.''' )
if max_weight <= 0:
raise ValueError('''max_weight must greater than zero.''' )
if any(p < 0 for p in profit ):
raise ValueError('''Profit can not be negative.''' )
if any(w < 0 for w in weight ):
raise ValueError('''Weight can not be negative.''' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
UpperCAmelCase_ = [p / w for p, w in zip(__magic_name__ , __magic_name__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
UpperCAmelCase_ = sorted(__magic_name__ )
# declaring useful variables
UpperCAmelCase_ = len(__magic_name__ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
UpperCAmelCase_ = sorted_profit_by_weight[length - i - 1]
UpperCAmelCase_ = profit_by_weight.index(__magic_name__ )
UpperCAmelCase_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_lowerCamelCase : Dict = [int(x) for x in input('Input profits separated by spaces: ').split()]
_lowerCamelCase : Union[str, Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
_lowerCamelCase : Union[str, Any] = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 121
|
def _lowerCAmelCase ( __magic_name__ :list ):
if any(not isinstance(__magic_name__ , __magic_name__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(__magic_name__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__magic_name__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 121
| 1
|
"""simple docstring"""
_lowerCAmelCase : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowerCAmelCase : Optional[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list[int]:
'''simple docstring'''
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
order.append(_lowerCamelCase )
return order
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> list[int]:
'''simple docstring'''
_lowerCamelCase : Any = True
_lowerCamelCase : Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return component
def lowerCamelCase_( _lowerCamelCase ) -> list[list[int]]:
'''simple docstring'''
_lowerCamelCase : List[str] = len(_lowerCamelCase ) * [False]
_lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(_lowerCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_lowerCamelCase )
_lowerCamelCase : int = []
for i, was_visited in enumerate(_lowerCamelCase ):
if not was_visited:
order += topology_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : List[Any] = []
_lowerCamelCase : Optional[int] = len(_lowerCamelCase ) * [False]
for i in range(len(_lowerCamelCase ) ):
_lowerCamelCase : int = order[len(_lowerCamelCase ) - i - 1]
if not visited[vert]:
_lowerCamelCase : str = find_components(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
components_list.append(_lowerCamelCase )
return components_list
| 386
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_lowerCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__)
_lowerCAmelCase : str = ['''names''', '''prefix''']
_lowerCAmelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
_lowerCAmelCase : Optional[int] = ['''encoding_errors''', '''on_bad_lines''']
_lowerCAmelCase : Any = ['''date_format''']
@dataclass
class A_ ( datasets.BuilderConfig ):
lowerCAmelCase__ = ","
lowerCAmelCase__ = None
lowerCAmelCase__ = "infer"
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = None
lowerCAmelCase__ = "."
lowerCAmelCase__ = None
lowerCAmelCase__ = '"'
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = None
lowerCAmelCase__ = 1_0_0_0_0
lowerCAmelCase__ = None
lowerCAmelCase__ = "strict"
lowerCAmelCase__ = "error"
lowerCAmelCase__ = None
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.delimiter is not None:
_lowerCamelCase : int = self.delimiter
if self.column_names is not None:
_lowerCamelCase : Optional[Any] = self.column_names
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,__lowerCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ ( datasets.ArrowBasedBuilder ):
lowerCAmelCase__ = CsvConfig
def _lowercase ( self: str ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
_lowerCamelCase : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase ,(str, list, tuple) ):
_lowerCamelCase : Tuple = data_files
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = [files]
_lowerCamelCase : Tuple = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
_lowerCamelCase : Dict = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Any = [files]
_lowerCamelCase : Dict = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase ,gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self: List[str] ,__lowerCAmelCase: pa.Table ):
'''simple docstring'''
if self.config.features is not None:
_lowerCamelCase : Union[str, Any] = self.config.features.arrow_schema
if all(not require_storage_cast(__lowerCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
_lowerCamelCase : Union[str, Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=__lowerCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_lowerCamelCase : Optional[int] = table_cast(__lowerCAmelCase ,__lowerCAmelCase )
return pa_table
def _lowercase ( self: List[str] ,__lowerCAmelCase: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_lowerCamelCase : str = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__lowerCAmelCase ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = pd.read_csv(__lowerCAmelCase ,iterator=__lowerCAmelCase ,dtype=__lowerCAmelCase ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Dict = pa.Table.from_pandas(__lowerCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise
| 386
| 1
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'deta'
__UpperCAmelCase : str = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _a=None , _a=900 , _a=2_048 , _a=6 , _a=2_048 , _a=8 , _a=6 , _a=1_024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.25 , **_a , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
__a = CONFIG_MAPPING["resnet"](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_a , _a ):
__a = backbone_config.pop('''model_type''' )
__a = CONFIG_MAPPING[backbone_model_type]
__a = config_class.from_dict(_a )
__a = backbone_config
__a = num_queries
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = init_xavier_std
__a = encoder_layerdrop
__a = auxiliary_loss
__a = position_embedding_type
# deformable attributes
__a = num_feature_levels
__a = encoder_n_points
__a = decoder_n_points
__a = two_stage
__a = two_stage_num_proposals
__a = with_box_refine
__a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = mask_loss_coefficient
__a = dice_loss_coefficient
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
__a = focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCAmelCase ( self ):
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self ):
return self.d_model
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.backbone_config.to_dict()
__a = self.__class__.model_type
return output
| 695
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
UpperCamelCase__ = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase_ : str = bs[:]
lowercase_ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
lowercase_ : Optional[int] = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = set()
lowercase_ : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ : Dict = char
return pairs
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = VOCAB_FILES_NAMES
__lowerCamelCase: int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Tuple , a : Tuple , a : Tuple , a : int="replace" , a : Optional[int]="<s>" , a : Tuple="</s>" , a : Tuple="</s>" , a : Tuple="<s>" , a : Optional[Any]="<unk>" , a : Dict="<pad>" , a : List[str]="<mask>" , a : Tuple=False , **a : Optional[int] , ):
'''simple docstring'''
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
lowercase_ : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
lowercase_ : Any = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
lowercase_ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
lowercase_ : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , )
with open(a , encoding="utf-8" ) as vocab_handle:
lowercase_ : Any = json.load(a )
lowercase_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
lowercase_ : Dict = errors # how to handle errors in decoding
lowercase_ : Any = bytes_to_unicode()
lowercase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
lowercase_ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
lowercase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowercase_ : List[str] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Optional[Any] = {}
lowercase_ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase_ : str = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase_ : Optional[Any] = tuple(a )
lowercase_ : Tuple = get_pairs(a )
if not pairs:
return token
while True:
lowercase_ : Any = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ : Dict = bigram
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = 0
while i < len(a ):
try:
lowercase_ : Union[str, Any] = word.index(a , a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ : Any = j
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ : Any = tuple(a )
lowercase_ : List[str] = new_word
if len(a ) == 1:
break
else:
lowercase_ : Union[str, Any] = get_pairs(a )
lowercase_ : List[str] = " ".join(a )
lowercase_ : Optional[int] = word
return word
def lowerCAmelCase__ ( self : Any , a : str ):
'''simple docstring'''
lowercase_ : Dict = []
for token in re.findall(self.pat , a ):
lowercase_ : Tuple = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(" " ) )
return bpe_tokens
def lowerCAmelCase__ ( self : Tuple , a : Dict ):
'''simple docstring'''
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Tuple , a : str ):
'''simple docstring'''
return self.decoder.get(a )
def lowerCAmelCase__ ( self : int , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = "".join(a )
lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase__ ( self : List[str] , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase_ : Any = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
lowercase_ : Dict = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase_ : Optional[Any] = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__ ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
lowercase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : str , a : Any , a : int=False , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()):
lowercase_ : str = " " + text
return (text, kwargs)
| 620
| 0
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = 'T5Config'
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 524
|
"""simple docstring"""
def A__ ( UpperCamelCase , UpperCamelCase ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(UpperCamelCase ) * abs(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 524
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = (boundary[1] - boundary[0]) / steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = boundary[0]
__SCREAMING_SNAKE_CASE : List[str] = boundary[1]
__SCREAMING_SNAKE_CASE : str = make_points(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = a + h
while x < (b - h):
yield x
__SCREAMING_SNAKE_CASE : Optional[int] = x + h
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] ): # enter your function here
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = (x - 0) * (x - 0)
return y
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0 # Lower bound of integration
__SCREAMING_SNAKE_CASE : Optional[int] = 1.0 # Upper bound of integration
__SCREAMING_SNAKE_CASE : Tuple = 1_0.0 # define number of steps or resolution
__SCREAMING_SNAKE_CASE : List[Any] = [a, b] # define boundary of integration
__SCREAMING_SNAKE_CASE : List[str] = method_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'y = {y}' )
if __name__ == "__main__":
main()
| 211
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self ) -> Dict:
self.test()
def _UpperCamelCase ( self ) -> str:
snake_case_ = 0
snake_case_ = False
while not completed:
if counter == 1:
self.reset()
snake_case_ = self.advance()
if not self.does_advance(a ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
snake_case_ , snake_case_ , snake_case_ = self.update(a )
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def _UpperCamelCase ( self ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _UpperCamelCase ( self , a ) -> int:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _UpperCamelCase ( self , a ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _UpperCamelCase ( self ) -> Any:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _UpperCamelCase ( self ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def _UpperCamelCase ( self , a=False ) -> Optional[int]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a ) -> List[str]:
super(a , self ).__init__()
if not isinstance(a , a ) or len(a ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(a , a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
snake_case_ = token_ids
snake_case_ = len(self.token_ids )
snake_case_ = -1 # the index of the currently fulfilled step
snake_case_ = False
def _UpperCamelCase ( self ) -> Dict:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _UpperCamelCase ( self , a ) -> List[str]:
if not isinstance(a , a ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(a )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _UpperCamelCase ( self , a ) -> Optional[int]:
if not isinstance(a , a ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(a )}''' )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(a ):
self.fulfilled_idx += 1
snake_case_ = True
if self.fulfilled_idx == (self.seqlen - 1):
snake_case_ = True
snake_case_ = completed
else:
# failed to make progress.
snake_case_ = True
self.reset()
return stepped, completed, reset
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = False
snake_case_ = 0
def _UpperCamelCase ( self ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def _UpperCamelCase ( self , a=False ) -> int:
snake_case_ = PhrasalConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.fulfilled_idx
snake_case_ = self.completed
return new_constraint
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a , a=True ) -> Optional[int]:
snake_case_ = max([len(a ) for one in nested_token_ids] )
snake_case_ = {}
for token_ids in nested_token_ids:
snake_case_ = root
for tidx, token_id in enumerate(a ):
if token_id not in level:
snake_case_ = {}
snake_case_ = level[token_id]
if no_subsets and self.has_subsets(a , a ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
snake_case_ = root
def _UpperCamelCase ( self , a ) -> List[Any]:
snake_case_ = self.trie
for current_token in current_seq:
snake_case_ = start[current_token]
snake_case_ = list(start.keys() )
return next_tokens
def _UpperCamelCase ( self , a ) -> Tuple:
snake_case_ = self.next_tokens(a )
return len(a ) == 0
def _UpperCamelCase ( self , a ) -> Optional[int]:
snake_case_ = list(root.values() )
if len(a ) == 0:
return 1
else:
return sum([self.count_leaves(a ) for nn in next_nodes] )
def _UpperCamelCase ( self , a , a ) -> Union[str, Any]:
snake_case_ = self.count_leaves(a )
return len(a ) != leaf_count
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a ) -> int:
super(a , self ).__init__()
if not isinstance(a , a ) or len(a ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(a , a ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(a , a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
snake_case_ = DisjunctiveTrie(a )
snake_case_ = nested_token_ids
snake_case_ = self.trie.max_height
snake_case_ = []
snake_case_ = False
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = self.trie.next_tokens(self.current_seq )
if len(a ) == 0:
return None
else:
return token_list
def _UpperCamelCase ( self , a ) -> int:
if not isinstance(a , a ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a )}''' )
snake_case_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _UpperCamelCase ( self , a ) -> Tuple:
if not isinstance(a , a ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(a )}''' )
snake_case_ = False
snake_case_ = False
snake_case_ = False
if self.does_advance(a ):
self.current_seq.append(a )
snake_case_ = True
else:
snake_case_ = True
self.reset()
snake_case_ = self.trie.reached_leaf(self.current_seq )
snake_case_ = completed
return stepped, completed, reset
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = False
snake_case_ = []
def _UpperCamelCase ( self ) -> str:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _UpperCamelCase ( self , a=False ) -> Optional[Any]:
snake_case_ = DisjunctiveConstraint(self.token_ids )
if stateful:
snake_case_ = self.seqlen
snake_case_ = self.current_seq
snake_case_ = self.completed
return new_constraint
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> int:
snake_case_ = constraints
# max # of steps required to fulfill a given constraint
snake_case_ = max([c.seqlen for c in constraints] )
snake_case_ = len(a )
snake_case_ = False
self.init_state()
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = []
snake_case_ = None
snake_case_ = [constraint.copy(stateful=a ) for constraint in self.constraints]
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
snake_case_ = constraint.advance()
if isinstance(a , a ):
token_list.append(a )
elif isinstance(a , a ):
token_list.extend(a )
else:
snake_case_ = self.inprogress_constraint.advance()
if isinstance(a , a ):
token_list.append(a )
elif isinstance(a , a ):
token_list.extend(a )
if len(a ) == 0:
return None
else:
return token_list
def _UpperCamelCase ( self , a ) -> str:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
snake_case_ , snake_case_ = self.add(a )
# the entire list of constraints are fulfilled
if self.completed:
break
def _UpperCamelCase ( self , a ) -> Dict:
if not isinstance(a , a ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
snake_case_ , snake_case_ = False, False
if self.completed:
snake_case_ = True
snake_case_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
snake_case_ , snake_case_ , snake_case_ = self.inprogress_constraint.update(a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a ) )
snake_case_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
snake_case_ = None
if len(self.pending_constraints ) == 0:
# we're done!
snake_case_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(a ):
snake_case_ , snake_case_ , snake_case_ = pending_constraint.update(a )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(a )
snake_case_ = None
if not complete and stepped:
snake_case_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
snake_case_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
snake_case_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _UpperCamelCase ( self , a=True ) -> str:
snake_case_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
snake_case_ = [
constraint.copy(stateful=a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
snake_case_ = self.inprogress_constraint.copy(stateful=a )
snake_case_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 706
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowercase = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self , a , a , a = None , a = None ) -> int:
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
snake_case_ = os.path.abspath('examples' )
for item in os.listdir(a ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(a , a )
if os.path.isfile(a ) and ".py" in item_path:
with self.subTest(
tested_script=a , feature_script=a , tested_section='main()' if parser_only else 'training_function()' , ):
snake_case_ = compare_against_test(
os.path.join(a , a ) , a , a , a )
snake_case_ = '\n'.join(a )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(a , '' )
self.assertEqual(a , '' )
def _UpperCamelCase ( self ) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py' , a )
self.one_complete_example('complete_nlp_example.py' , a )
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , a , a , a )
self.one_complete_example('complete_cv_example.py' , a , a , a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
@classmethod
def _UpperCamelCase ( cls ) -> Optional[int]:
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def _UpperCamelCase ( cls ) -> Optional[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
else:
self.assertIn('epoch 0:' , a )
self.assertIn('epoch 1:' , a )
@slow
def _UpperCamelCase ( self ) -> int:
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=a )
snake_case_ = re.findall('({.+})' , a )
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(a )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def _UpperCamelCase ( self ) -> Optional[int]:
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _UpperCamelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(a , 'tracking' ) ) )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 607
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 579
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for a, b in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def A__ ( self ):
UpperCAmelCase_ = None
ops.enable_eager_execution_internal()
UpperCAmelCase_ = tf.config.list_physical_devices("CPU" )
if len(lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCAmelCase_ = tf.config.list_logical_devices(device_type="CPU" )
UpperCAmelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCAmelCase_ = GradientAccumulator()
UpperCAmelCase_ = tf.Variable([4.0, 3.0] )
UpperCAmelCase_ , UpperCAmelCase_ = create_optimizer(5e-5 , 10 , 5 )
UpperCAmelCase_ = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase )
def accumulate_on_replica(lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCAmelCase , lowerCAmelCase ):
with strategy.scope():
UpperCAmelCase_ = strategy.experimental_local_results(lowerCAmelCase )
local_variables[0].assign(lowerCAmelCase )
local_variables[1].assign(lowerCAmelCase )
strategy.run(lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase )
def _check_local_values(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 579
| 1
|
def a_ ( _A , _A ) -> float:
"""simple docstring"""
def get_matched_characters(_A , _A ) -> str:
snake_case__ = []
snake_case__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case__ = int(max(0 , i - limit ) )
snake_case__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_A )
snake_case__ = f'''{_stra[0:_stra.index(_A )]} {_stra[_stra.index(_A ) + 1:]}'''
return "".join(_A )
# matching characters
snake_case__ = get_matched_characters(_A , _A )
snake_case__ = get_matched_characters(_A , _A )
snake_case__ = len(_A )
# transposition
snake_case__ = (
len([(ca, ca) for ca, ca in zip(_A , _A ) if ca != ca] ) // 2
)
if not match_count:
snake_case__ = 0.0
else:
snake_case__ = (
1
/ 3
* (
match_count / len(_A )
+ match_count / len(_A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 720
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "roformer"
def __init__( self: Tuple , UpperCamelCase: Optional[Any]=5_00_00 , UpperCamelCase: str=None , UpperCamelCase: Any=7_68 , UpperCamelCase: Dict=12 , UpperCamelCase: List[Any]=12 , UpperCamelCase: List[str]=30_72 , UpperCamelCase: int="gelu" , UpperCamelCase: str=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=15_36 , UpperCamelCase: Dict=2 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[str]=1e-12 , UpperCamelCase: int=0 , UpperCamelCase: Any=False , UpperCamelCase: int=True , **UpperCamelCase: List[Any] , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
snake_case__ = vocab_size
snake_case__ = hidden_size if embedding_size is None else embedding_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = hidden_act
snake_case__ = intermediate_size
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = rotary_value
snake_case__ = use_cache
class __SCREAMING_SNAKE_CASE( a_ ):
@property
def lowerCAmelCase_ ( self: Any ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 372
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : int = downstream_dict["projector.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["projector.bias"]
__UpperCAmelCase : Optional[Any] = downstream_dict["model.post_net.linear.weight"]
__UpperCAmelCase : List[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.bias"]
return model
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = WavaVecaForXVector.from_pretrained(UpperCamelCase , config=UpperCamelCase )
__UpperCAmelCase : Tuple = downstream_dict["connector.weight"]
__UpperCAmelCase : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : int = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__UpperCAmelCase : Union[str, Any] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__UpperCAmelCase : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__UpperCAmelCase : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__UpperCAmelCase : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__UpperCAmelCase : int = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Optional[Any] = checkpoint["Downstream"]
__UpperCAmelCase : int = WavaVecaConfig.from_pretrained(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase , return_attention_mask=UpperCamelCase , do_normalize=UpperCamelCase )
__UpperCAmelCase : Dict = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
__UpperCAmelCase : List[Any] = convert_classification(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
__UpperCAmelCase : List[str] = convert_diarization(UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif arch.endswith("ForXVector" ):
__UpperCAmelCase : str = convert_xvector(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(UpperCamelCase )
hf_model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 77
|
"""simple docstring"""
from collections import namedtuple
A = namedtuple("""from_to""", """from_ to""")
A = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_a , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :str = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :str = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :int = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :List[str] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
TrainingJobAnalytics(_a ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> str:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :int = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _a )
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 256
| 0
|
'''simple docstring'''
from typing import Any
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ ) -> List[str]:
a_ = data
a_ = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class _snake_case :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
a_ = None
def __iter__( self ) -> Any:
a_ = self.head
while node:
yield node.data
a_ = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __getitem__( self , UpperCAmelCase__ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
a_ = self.head
for _ in range(UpperCAmelCase__ ):
a_ = current.next
a_ = data
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None:
self.insert_nth(len(self ) , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ ) -> None:
self.insert_nth(0 , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
a_ = Node(UpperCAmelCase__ )
if self.head is None:
a_ = new_node
elif index == 0:
a_ = self.head # link new_node to head
a_ = new_node
else:
a_ = self.head
for _ in range(index - 1 ):
a_ = temp.next
a_ = temp.next
a_ = new_node
def __SCREAMING_SNAKE_CASE ( self ) -> None: # print every node data
print(self )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
a_ = self.head # default first node
if index == 0:
a_ = self.head.next
else:
a_ = self.head
for _ in range(index - 1 ):
a_ = temp.next
a_ = temp.next
a_ = temp.next.next
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
return self.head is None
def __SCREAMING_SNAKE_CASE ( self ) -> None:
a_ = None
a_ = self.head
while current:
# Store the current node's next node.
a_ = current.next
# Make the current node's next point backwards
a_ = prev
# Make the previous node be the current node
a_ = current
# Make the current node the next node (to progress iteration)
a_ = next_node
# Return prev in order to put the head at the end
a_ = prev
def a ( ) -> None:
"""simple docstring"""
a_ = LinkedList()
assert linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(_UpperCAmelCase ) == i
linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(_UpperCAmelCase ) == 9
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a_ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(-8 , 1 ) )
def a ( ) -> None:
"""simple docstring"""
a_ = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
a_ = LinkedList()
for i in test_input:
linked_list.insert_tail(_UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a_ = linked_list.delete_head()
assert result == -9
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a_ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a_ = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(_UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_UpperCAmelCase )
assert (
str(_UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
a_ = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_UpperCAmelCase )
print('\nReading/changing Node data using indexing:' )
print(F'''Element at Position 1: {linked_list[1]}''' )
a_ = input('Enter New Value: ' ).strip()
print('New list:' )
print(_UpperCAmelCase )
print(F'''length of linked_list is : {len(_UpperCAmelCase )}''' )
if __name__ == "__main__":
main()
| 697
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697
| 1
|
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("""Input value must be an 'int' type""" )
A__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247
|
from __future__ import annotations
A : Optional[int] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __a :float , __a :float , __a :float , __a :float ) -> dict[str, float]:
"""simple docstring"""
A__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247
| 1
|
import numpy as np
UpperCAmelCase__ : str = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self) -> None:
UpperCamelCase__ : int = np.array(UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase) -> np.ndarray:
UpperCamelCase__ , UpperCamelCase__ : List[str] = np.where(letter == self.SQUARE)
UpperCamelCase__ : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1])
return indexes
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> str:
UpperCamelCase__ : Optional[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCAmelCase__ ( self , UpperCamelCase) -> str:
UpperCamelCase__ : Optional[int] = message.lower()
UpperCamelCase__ : Any = message.replace(' ' , '')
UpperCamelCase__ : Any = message.replace('j' , 'i')
UpperCamelCase__ : Tuple = np.empty((2, len(UpperCamelCase)))
for letter_index in range(len(UpperCamelCase)):
UpperCamelCase__ : Optional[Any] = self.letter_to_numbers(message[letter_index])
UpperCamelCase__ : Optional[Any] = numbers[0]
UpperCamelCase__ : Any = numbers[1]
UpperCamelCase__ : List[Any] = first_step.reshape(2 * len(UpperCamelCase))
UpperCamelCase__ : List[Any] = ''
for numbers_index in range(len(UpperCamelCase)):
UpperCamelCase__ : int = int(second_step[numbers_index * 2])
UpperCamelCase__ : List[str] = int(second_step[(numbers_index * 2) + 1])
UpperCamelCase__ : int = self.numbers_to_letter(UpperCamelCase , UpperCamelCase)
UpperCamelCase__ : List[Any] = encoded_message + letter
return encoded_message
def lowerCAmelCase__ ( self , UpperCamelCase) -> str:
UpperCamelCase__ : int = message.lower()
message.replace(' ' , '')
UpperCamelCase__ : Any = np.empty(2 * len(UpperCamelCase))
for letter_index in range(len(UpperCamelCase)):
UpperCamelCase__ : List[str] = self.letter_to_numbers(message[letter_index])
UpperCamelCase__ : List[Any] = numbers[0]
UpperCamelCase__ : List[str] = numbers[1]
UpperCamelCase__ : Tuple = first_step.reshape((2, len(UpperCamelCase)))
UpperCamelCase__ : Optional[int] = ''
for numbers_index in range(len(UpperCamelCase)):
UpperCamelCase__ : int = int(second_step[0, numbers_index])
UpperCamelCase__ : Any = int(second_step[1, numbers_index])
UpperCamelCase__ : str = self.numbers_to_letter(UpperCamelCase , UpperCamelCase)
UpperCamelCase__ : Tuple = decoded_message + letter
return decoded_message
| 410
|
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
return " ".join(
''.join(word[::-1] ) if len(__SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 410
| 1
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 0
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : str = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE : int = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[str] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE : str = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) )
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE : Any = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ ).to_dict()
config_dict.pop("""image_processor_type""" )
SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor(**UpperCAmelCase_ )
# save in new folder
model_config.save_pretrained(UpperCAmelCase_ )
config.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , """clip-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(UpperCAmelCase_ , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_ ):
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCAmelCase_ ) / """preprocessor_config.json"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCAmelCase_ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(UpperCAmelCase_ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(UpperCAmelCase_ , """w""" ) )
SCREAMING_SNAKE_CASE : Dict = CustomImageProcessor.from_pretrained(UpperCAmelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoImageProcessor.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
class UpperCamelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
try:
AutoConfig.register("""custom""" , UpperCAmelCase_ )
AutoImageProcessor.register(UpperCAmelCase_ , UpperCAmelCase_ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=UpperCAmelCase_ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(UpperCAmelCase_ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 711
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = """Hello world! cécé herlolip"""
__UpperCAmelCase = namedtuple(
"""BertAbsConfig""",
[
"""temp_dir""",
"""large""",
"""use_bert_emb""",
"""finetune_bert""",
"""encoder""",
"""share_emb""",
"""max_pos""",
"""enc_layers""",
"""enc_hidden_size""",
"""enc_heads""",
"""enc_ff_size""",
"""enc_dropout""",
"""dec_layers""",
"""dec_hidden_size""",
"""dec_heads""",
"""dec_ff_size""",
"""dec_dropout""",
],
)
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage )
SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ )
original.eval()
SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--bertabs_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
__UpperCAmelCase = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 79
| 0
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = tmp_path / """cache"""
UpperCamelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : Dict = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = tmp_path / """cache"""
UpperCamelCase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase : Optional[int] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = tmp_path / """cache"""
UpperCamelCase : Optional[int] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = [parquet_path]
UpperCamelCase : List[str] = tmp_path / """cache"""
UpperCamelCase : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
UpperCamelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = tmp_path / """cache"""
UpperCamelCase : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase : str = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = tmp_path / """cache"""
UpperCamelCase : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase : int = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase : Union[str, Any] = ParquetDatasetReader({"""train""": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if split:
UpperCamelCase : Optional[int] = {split: parquet_path}
else:
UpperCamelCase : Any = """train"""
UpperCamelCase : str = {"""train""": parquet_path, """test""": parquet_path}
UpperCamelCase : Union[str, Any] = tmp_path / """cache"""
UpperCamelCase : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCamelCase : str = pq.ParquetFile(tmp_path / """foo.parquet""" )
UpperCamelCase : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = str(shared_datadir / """test_image_rgb.jpg""" )
UpperCamelCase : Any = {"""image""": [image_path]}
UpperCamelCase : List[Any] = Features({"""image""": Image()} )
UpperCamelCase : Optional[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" )
assert writer.write() > 0
UpperCamelCase : Any = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
UpperCamelCase : Dict = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 102
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase__ : List[Any] = "examples/"
lowercase__ : Any = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowercase__ : str = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowercase__ : Optional[Any] = "README.md"
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Any ):
'''simple docstring'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ , UpperCAmelCase_ = REPLACE_PATTERNS[pattern]
UpperCAmelCase_ = replace.replace('''VERSION''' , _UpperCamelCase )
UpperCAmelCase_ = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern='''examples''' )
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase_ = '''1. Want to contribute a new model?'''
with open(_UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Find the start of the list.
UpperCAmelCase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase_ = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase_ = f.read()
UpperCAmelCase_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def __lowerCamelCase ( _UpperCamelCase : Tuple=False ):
'''simple docstring'''
UpperCAmelCase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase_ = default_version.base_version
elif patch:
UpperCAmelCase_ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase_ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase_ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_UpperCamelCase ) == 0:
UpperCAmelCase_ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = get_version()
UpperCAmelCase_ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase_ = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_UpperCamelCase ) == 0:
UpperCAmelCase_ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowercase__ : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 390
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : Optional[int] , snake_case : Any=13 , snake_case : List[Any]=32 , snake_case : int=2 , snake_case : List[str]=3 , snake_case : Optional[int]=16 , snake_case : Optional[Any]=[32, 64, 128] , snake_case : Any=[1, 2, 1] , snake_case : Optional[int]=[2, 2, 4] , snake_case : Union[str, Any]=2 , snake_case : Optional[int]=2.0 , snake_case : str=True , snake_case : Any=0.0 , snake_case : Any=0.0 , snake_case : str=0.1 , snake_case : Any="gelu" , snake_case : Any=False , snake_case : List[str]=True , snake_case : int=0.02 , snake_case : List[Any]=1e-5 , snake_case : Tuple=True , snake_case : Union[str, Any]=None , snake_case : Dict=True , snake_case : Any=10 , snake_case : List[Any]=8 , snake_case : str=["stage1", "stage2"] , snake_case : Dict=[1, 2] , ) -> Any:
'''simple docstring'''
__magic_name__ : Optional[Any] = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : List[Any] = image_size
__magic_name__ : Union[str, Any] = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : Optional[Any] = embed_dim
__magic_name__ : Tuple = hidden_sizes
__magic_name__ : Union[str, Any] = depths
__magic_name__ : Any = num_heads
__magic_name__ : List[str] = window_size
__magic_name__ : List[Any] = mlp_ratio
__magic_name__ : Union[str, Any] = qkv_bias
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[str] = drop_path_rate
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Any = use_absolute_embeddings
__magic_name__ : List[Any] = patch_norm
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : List[Any] = is_training
__magic_name__ : Tuple = scope
__magic_name__ : List[str] = use_labels
__magic_name__ : Tuple = type_sequence_label_size
__magic_name__ : str = encoder_stride
__magic_name__ : Optional[Any] = out_features
__magic_name__ : Any = out_indices
def _UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
if self.use_labels:
__magic_name__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self : List[Any] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Any = FocalNetModel(config=__a )
model.to(__a )
model.eval()
__magic_name__ : Tuple = model(__a )
__magic_name__ : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self : Optional[int] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__magic_name__ : Any = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
__magic_name__ : Tuple = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__magic_name__ : int = None
__magic_name__ : int = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
__magic_name__ : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCAmelCase ( self : Optional[Any] , snake_case : str , snake_case : Dict , snake_case : List[str] ) -> Dict:
'''simple docstring'''
__magic_name__ : Union[str, Any] = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
__magic_name__ : Dict = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Union[str, Any] = 1
__magic_name__ : Union[str, Any] = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
__magic_name__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Union[str, Any] = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : Optional[Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : List[str] = self.type_sequence_label_size
__magic_name__ : Dict = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
__magic_name__ : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : int = 1
__magic_name__ : str = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
__magic_name__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : int = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ : int = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : List[str] = config_and_inputs
__magic_name__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[str] = FocalNetModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def _UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
return
def _UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def _UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def _UpperCAmelCase ( self : int ) -> str:
'''simple docstring'''
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _UpperCAmelCase ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def _UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = model_class(__a )
__magic_name__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a )
def _UpperCAmelCase ( self : List[str] , snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__magic_name__ : Any = model(**self._prepare_for_class(__a , __a ) )
__magic_name__ : Any = outputs.hidden_states
__magic_name__ : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
__magic_name__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ : str = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict = reshaped_hidden_states[0].shape
__magic_name__ : Optional[int] = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__magic_name__ , __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Dict = True
self.check_hidden_states_output(__a , __a , __a , __a )
def _UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Union[str, Any] = 3
__magic_name__ : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__magic_name__ : Optional[int] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[str] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ) -> str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = _config_zero_init(__a )
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : Optional[int] = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(__a )
__magic_name__ : Tuple = self.default_image_processor
__magic_name__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ : str = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# forward pass
with torch.no_grad():
__magic_name__ : int = model(**__a )
# verify the logits
__magic_name__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__magic_name__ : Optional[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class _UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (FocalNetBackbone,) if is_torch_available() else ()
snake_case_ = FocalNetConfig
snake_case_ = False
def _UpperCAmelCase ( self : List[Any] ) -> int:
'''simple docstring'''
__magic_name__ : Tuple = FocalNetModelTester(self )
| 712
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = ['note_seq']
def __init__( self : List[Any] , *snake_case : Any , **snake_case : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *snake_case : Optional[int] , **snake_case : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case : Optional[int] , **snake_case : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 147
| 0
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowercase ( _UpperCamelCase, _UpperCamelCase=1 ) ->int:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0 ) ->List[str]:
"""simple docstring"""
lowercase : Optional[int] = []
for old_item in old_list:
lowercase : Optional[Any] = old_item.replace('''in_layers.0''', '''norm1''' )
lowercase : List[str] = new_item.replace('''in_layers.2''', '''conv1''' )
lowercase : List[str] = new_item.replace('''out_layers.0''', '''norm2''' )
lowercase : Optional[Any] = new_item.replace('''out_layers.3''', '''conv2''' )
lowercase : Dict = new_item.replace('''emb_layers.1''', '''time_emb_proj''' )
lowercase : List[str] = new_item.replace('''skip_connection''', '''conv_shortcut''' )
lowercase : int = shave_segments(_UpperCamelCase, n_shave_prefix_segments=_UpperCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowercase ( _UpperCamelCase, _UpperCamelCase=0 ) ->int:
"""simple docstring"""
lowercase : List[Any] = []
for old_item in old_list:
lowercase : str = old_item
lowercase : Union[str, Any] = new_item.replace('''norm.weight''', '''group_norm.weight''' )
lowercase : Dict = new_item.replace('''norm.bias''', '''group_norm.bias''' )
lowercase : Union[str, Any] = new_item.replace('''proj_out.weight''', '''proj_attn.weight''' )
lowercase : Optional[int] = new_item.replace('''proj_out.bias''', '''proj_attn.bias''' )
lowercase : Union[str, Any] = shave_segments(_UpperCamelCase, n_shave_prefix_segments=_UpperCamelCase )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, _UpperCamelCase=None ) ->Union[str, Any]:
"""simple docstring"""
assert isinstance(_UpperCamelCase, _UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowercase : Optional[int] = old_checkpoint[path]
lowercase : List[Any] = old_tensor.shape[0] // 3
lowercase : str = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowercase : str = old_tensor.shape[0] // config['''num_head_channels'''] // 3
lowercase : List[str] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowercase , lowercase , lowercase : Optional[Any] = old_tensor.split(channels // num_heads, dim=1 )
lowercase : Optional[Any] = query.reshape(_UpperCamelCase )
lowercase : Tuple = key.reshape(_UpperCamelCase )
lowercase : Dict = value.reshape(_UpperCamelCase )
for path in paths:
lowercase : List[str] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowercase : List[str] = new_path.replace('''middle_block.0''', '''mid_block.resnets.0''' )
lowercase : Dict = new_path.replace('''middle_block.1''', '''mid_block.attentions.0''' )
lowercase : Optional[int] = new_path.replace('''middle_block.2''', '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
lowercase : Optional[int] = new_path.replace(replacement['''old'''], replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowercase : Union[str, Any] = old_checkpoint[path['''old''']][:, :, 0]
else:
lowercase : Optional[int] = old_checkpoint[path['''old''']]
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : List[str] = {}
lowercase : List[Any] = checkpoint['''time_embed.0.weight''']
lowercase : List[str] = checkpoint['''time_embed.0.bias''']
lowercase : str = checkpoint['''time_embed.2.weight''']
lowercase : Optional[Any] = checkpoint['''time_embed.2.bias''']
lowercase : Tuple = checkpoint['''input_blocks.0.0.weight''']
lowercase : Optional[Any] = checkpoint['''input_blocks.0.0.bias''']
lowercase : Union[str, Any] = checkpoint['''out.0.weight''']
lowercase : Optional[Any] = checkpoint['''out.0.bias''']
lowercase : Dict = checkpoint['''out.2.weight''']
lowercase : int = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
lowercase : Dict = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
lowercase : List[Any] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
lowercase : List[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
lowercase : Any = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the output blocks only
lowercase : Tuple = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
lowercase : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(_UpperCamelCase )
}
for i in range(1, _UpperCamelCase ):
lowercase : Optional[int] = (i - 1) // (config['''num_res_blocks'''] + 1)
lowercase : int = (i - 1) % (config['''num_res_blocks'''] + 1)
lowercase : List[str] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowercase : Union[str, Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowercase : str = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowercase : Any = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowercase : Dict = renew_resnet_paths(_UpperCamelCase )
lowercase : List[str] = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowercase : Optional[int] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, additional_replacements=[meta_path, resnet_op], config=_UpperCamelCase )
if len(_UpperCamelCase ):
lowercase : List[str] = renew_attention_paths(_UpperCamelCase )
lowercase : Tuple = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase : Optional[int] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, additional_replacements=[meta_path], attention_paths_to_split=_UpperCamelCase, config=_UpperCamelCase, )
lowercase : Union[str, Any] = middle_blocks[0]
lowercase : Dict = middle_blocks[1]
lowercase : Dict = middle_blocks[2]
lowercase : int = renew_resnet_paths(_UpperCamelCase )
assign_to_checkpoint(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, config=_UpperCamelCase )
lowercase : Any = renew_resnet_paths(_UpperCamelCase )
assign_to_checkpoint(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, config=_UpperCamelCase )
lowercase : List[Any] = renew_attention_paths(_UpperCamelCase )
lowercase : Optional[int] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, attention_paths_to_split=_UpperCamelCase, config=_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase : Union[str, Any] = i // (config['''num_res_blocks'''] + 1)
lowercase : str = i % (config['''num_res_blocks'''] + 1)
lowercase : int = [shave_segments(_UpperCamelCase, 2 ) for name in output_blocks[i]]
lowercase : Dict = {}
for layer in output_block_layers:
lowercase , lowercase : List[Any] = layer.split('''.''' )[0], shave_segments(_UpperCamelCase, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_UpperCamelCase )
else:
lowercase : Dict = [layer_name]
if len(_UpperCamelCase ) > 1:
lowercase : Any = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowercase : Optional[int] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowercase : List[str] = renew_resnet_paths(_UpperCamelCase )
lowercase : Optional[int] = renew_resnet_paths(_UpperCamelCase )
lowercase : Any = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, additional_replacements=[meta_path], config=_UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowercase : Any = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
lowercase : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowercase : Any = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(_UpperCamelCase ) == 2:
lowercase : Optional[Any] = []
if len(_UpperCamelCase ):
lowercase : Any = renew_attention_paths(_UpperCamelCase )
lowercase : Tuple = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowercase : List[str] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, additional_replacements=[meta_path], attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None, config=_UpperCamelCase, )
else:
lowercase : Optional[int] = renew_resnet_paths(_UpperCamelCase, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowercase : Optional[Any] = '''.'''.join(['''output_blocks''', str(_UpperCamelCase ), path['''old''']] )
lowercase : Union[str, Any] = '''.'''.join(['''up_blocks''', str(_UpperCamelCase ), '''resnets''', str(_UpperCamelCase ), path['''new''']] )
lowercase : List[Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__a = parser.parse_args()
__a = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__a = json.loads(f.read())
__a = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__a = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__a = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__a = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__a = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 319
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __lowerCamelCase ( self ):
return self.get_dummy_input()
@property
def __lowerCamelCase ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ):
lowercase : Optional[int] = 4
lowercase : Dict = 32
lowercase : List[str] = (32, 32)
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : int = (batch_size, num_channels) + sizes
lowercase : str = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowercase : List[Any] = 128
lowercase : List[Any] = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
lowercase : List[Any] = torch.manual_seed(1 )
lowercase : Optional[Any] = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
lowercase : Optional[Any] = floats_tensor((batch_size, 32, 32) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
lowercase : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
lowercase : Optional[int] = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : str = self.prepare_init_args_and_inputs_for_common()
lowercase : List[str] = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
lowercase : Tuple = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
lowercase : Optional[Any] = output[0, -1, -3:, -3:]
lowercase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCamelCase ( self ):
lowercase , lowercase : Dict = self.prepare_init_args_and_inputs_for_common()
lowercase : Optional[int] = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = output[0]
lowercase : int = torch.device(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
| 319
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : str = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A_ ( _a ):
lowerCAmelCase__ = 'umt5'
lowerCAmelCase__ = ['past_key_values']
def __init__( self: Dict ,__lowerCAmelCase: str=250_112 ,__lowerCAmelCase: Optional[Any]=512 ,__lowerCAmelCase: str=64 ,__lowerCAmelCase: Optional[Any]=1_024 ,__lowerCAmelCase: Optional[int]=8 ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=6 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Dict=128 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: List[str]=1e-6 ,__lowerCAmelCase: List[Any]=1.0 ,__lowerCAmelCase: Optional[Any]="gated-gelu" ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: List[str]="T5Tokenizer" ,__lowerCAmelCase: Union[str, Any]=True ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: List[str]=1 ,__lowerCAmelCase: int=0 ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowerCAmelCase ,tokenizer_class=__lowerCAmelCase ,tie_word_embeddings=__lowerCAmelCase ,pad_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,decoder_start_token_id=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Optional[int] = d_model
_lowerCamelCase : str = d_kv
_lowerCamelCase : List[Any] = d_ff
_lowerCamelCase : str = num_layers
_lowerCamelCase : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : Optional[Any] = relative_attention_num_buckets
_lowerCamelCase : Optional[int] = relative_attention_max_distance
_lowerCamelCase : Any = dropout_rate
_lowerCamelCase : str = layer_norm_epsilon
_lowerCamelCase : Union[str, Any] = initializer_factor
_lowerCamelCase : Dict = feed_forward_proj
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : List[Any] = self.feed_forward_proj.split("-" )
_lowerCamelCase : Optional[int] = act_info[-1]
_lowerCamelCase : Dict = act_info[0] == "gated"
if len(__lowerCAmelCase ) > 1 and act_info[0] != "gated" or len(__lowerCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : str = "gelu_new"
@property
def _lowercase ( self: Dict ):
'''simple docstring'''
return self.d_model
@property
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return self.num_heads
@property
def _lowercase ( self: Any ):
'''simple docstring'''
return self.num_layers
class A_ ( _a ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
_lowerCamelCase : Union[str, Any] = "past_encoder_sequence + sequence"
_lowerCamelCase : Union[str, Any] = {0: "batch"}
_lowerCamelCase : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCamelCase : Any = {0: "batch", 1: "decoder_sequence"}
_lowerCamelCase : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase ,direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _lowercase ( self: Tuple ):
'''simple docstring'''
return 13
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return 5e-4
| 703
|
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
_lowerCamelCase : Tuple = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
_lowerCamelCase : Union[str, Any] = json.load(_lowerCamelCase )
for dpr_record in tqdm(_lowerCamelCase ):
_lowerCamelCase : Tuple = dpr_record["question"]
_lowerCamelCase : List[str] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(_lowerCamelCase ) + "\n" )
if __name__ == "__main__":
main()
| 386
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
_lowerCamelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def lowerCAmelCase_ ( lowercase_ : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = {}
with open(_lowercase , '''r''' ) as file:
for line_number, line in enumerate(_lowercase ):
__SCREAMING_SNAKE_CASE : Any = line.strip()
if line:
__SCREAMING_SNAKE_CASE : Union[str, Any] = line.split()
__SCREAMING_SNAKE_CASE : Dict = line_number
__SCREAMING_SNAKE_CASE : Optional[int] = words[0]
__SCREAMING_SNAKE_CASE : str = value
return result
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Any ):
'''simple docstring'''
for attribute in key.split('''.''' ):
__SCREAMING_SNAKE_CASE : int = getattr(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : Dict = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : int = getattr(_lowercase , _lowercase ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = getattr(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE : Optional[int] = value[0]
else:
__SCREAMING_SNAKE_CASE : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE : Any = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE : Optional[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
else:
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowercase ):
__SCREAMING_SNAKE_CASE : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__SCREAMING_SNAKE_CASE : Dict = '''param'''
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE : Dict = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE : str = '''.'''.join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE : List[Any] = key
__SCREAMING_SNAKE_CASE : Union[str, Any] = value if '''lm_head''' in full_key else value[0]
_lowerCamelCase = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def lowerCAmelCase_ ( lowercase_ : Tuple , lowercase_ : int , lowercase_ : int=None , lowercase_ : Union[str, Any]=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE : Optional[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__SCREAMING_SNAKE_CASE : str = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE : Any = name.split(_lowercase )[0].split('''.''' )[-2]
__SCREAMING_SNAKE_CASE : List[str] = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE : Any = '''weight_g'''
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE : str = '''weight_v'''
elif "bias" in name:
__SCREAMING_SNAKE_CASE : Dict = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE : List[str] = '''weight'''
else:
__SCREAMING_SNAKE_CASE : str = None
if hf_dict is not None:
rename_dict(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return is_used
return is_used
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , )
__SCREAMING_SNAKE_CASE : List[str] = True
else:
__SCREAMING_SNAKE_CASE : Optional[int] = load_wavaveca_layer(_lowercase , _lowercase , _lowercase )
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
__SCREAMING_SNAKE_CASE : Optional[int] = name.split('''.''' )
__SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
__SCREAMING_SNAKE_CASE : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def lowerCAmelCase_ ( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str=None , lowercase_ : List[Any]=True , lowercase_ : Tuple=False ):
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(_lowercase )
else:
__SCREAMING_SNAKE_CASE : Dict = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE : str = read_txt_into_dict(_lowercase )
__SCREAMING_SNAKE_CASE : str = idalabel
__SCREAMING_SNAKE_CASE : int = WavaVecaForSequenceClassification(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
feature_extractor.save_pretrained(_lowercase )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE : str = Dictionary.load(_lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.pad_index
__SCREAMING_SNAKE_CASE : Any = target_dict.bos_index
__SCREAMING_SNAKE_CASE : str = target_dict.eos_index
__SCREAMING_SNAKE_CASE : str = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(_lowercase , '''vocab.json''' )
if not os.path.isdir(_lowercase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowercase ) )
return
os.makedirs(_lowercase , exist_ok=_lowercase )
__SCREAMING_SNAKE_CASE : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : List[Any] = 1
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_lowercase , _lowercase )
__SCREAMING_SNAKE_CASE : Tuple = WavaVecaCTCTokenizer(
_lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowercase , )
__SCREAMING_SNAKE_CASE : Dict = True if config.feat_extract_norm == '''layer''' else False
__SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , )
__SCREAMING_SNAKE_CASE : Tuple = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
processor.save_pretrained(_lowercase )
__SCREAMING_SNAKE_CASE : Dict = WavaVecaForCTC(_lowercase )
else:
__SCREAMING_SNAKE_CASE : str = WavaVecaForPreTraining(_lowercase )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE : str = argparse.Namespace(task='''audio_pretraining''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.tasks.setup_task(_lowercase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowercase )
__SCREAMING_SNAKE_CASE : List[Any] = model[0].eval()
recursively_load_weights(_lowercase , _lowercase , not is_finetuned )
hf_wavavec.save_pretrained(_lowercase )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
_lowerCamelCase = parser.parse_args()
_lowerCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 674
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 282
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Union[str, Any] ={
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE_ ( snake_case : Dict , snake_case : Union[str, Any]=0.9_9_9 , snake_case : List[Any]="cosine" , )-> Any:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case : Dict ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case : Union[str, Any] ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_lowerCamelCase = []
for i in range(snake_case ):
_lowerCamelCase = i / num_diffusion_timesteps
_lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : Tuple = 2
@register_to_config
def __init__( self , a__ = 10_00 , a__ = 0.00085 , a__ = 0.012 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = False , a__ = False , a__ = 1.0 , a__ = "linspace" , a__ = 0 , ):
if trained_betas is not None:
_lowerCamelCase = torch.tensor(a__ , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase = betas_for_alpha_bar(a__ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
_lowerCamelCase = betas_for_alpha_bar(a__ , alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_lowerCamelCase = 1.0 - self.betas
_lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a__ , a__ , a__ )
_lowerCamelCase = use_karras_sigmas
def snake_case_ ( self , a__ , a__=None ):
if schedule_timesteps is None:
_lowerCamelCase = self.timesteps
_lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_lowerCamelCase = 1 if len(a__ ) > 1 else 0
else:
_lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
_lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self , a__ , a__ , ):
_lowerCamelCase = self.index_for_timestep(a__ )
_lowerCamelCase = self.sigmas[step_index]
_lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self , a__ , a__ = None , a__ = None , ):
_lowerCamelCase = num_inference_steps
_lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase = (np.arange(0 , a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase = (np.arange(a__ , 0 , -step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_lowerCamelCase = np.log(a__ )
_lowerCamelCase = np.interp(a__ , np.arange(0 , len(a__ ) ) , a__ )
if self.config.use_karras_sigmas:
_lowerCamelCase = self._convert_to_karras(in_sigmas=a__ , num_inference_steps=self.num_inference_steps )
_lowerCamelCase = np.array([self._sigma_to_t(a__ , a__ ) for sigma in sigmas] )
_lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_lowerCamelCase = torch.from_numpy(a__ ).to(device=a__ )
_lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_lowerCamelCase = torch.from_numpy(a__ )
_lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a__ ).startswith('mps' ):
# mps does not support float64
_lowerCamelCase = timesteps.to(a__ , dtype=torch.floataa )
else:
_lowerCamelCase = timesteps.to(device=a__ )
# empty dt and derivative
_lowerCamelCase = None
_lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCamelCase = defaultdict(a__ )
def snake_case_ ( self , a__ , a__ ):
# get log sigma
_lowerCamelCase = np.log(a__ )
# get distribution
_lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_lowerCamelCase = low_idx + 1
_lowerCamelCase = log_sigmas[low_idx]
_lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
_lowerCamelCase = (low - log_sigma) / (low - high)
_lowerCamelCase = np.clip(a__ , 0 , 1 )
# transform interpolation to time range
_lowerCamelCase = (1 - w) * low_idx + w * high_idx
_lowerCamelCase = t.reshape(sigma.shape )
return t
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = in_sigmas[-1].item()
_lowerCamelCase = in_sigmas[0].item()
_lowerCamelCase = 7.0 # 7.0 is the value used in the paper
_lowerCamelCase = np.linspace(0 , 1 , a__ )
_lowerCamelCase = sigma_min ** (1 / rho)
_lowerCamelCase = sigma_max ** (1 / rho)
_lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def snake_case_ ( self ):
return self.dt is None
def snake_case_ ( self , a__ , a__ , a__ , a__ = True , ):
_lowerCamelCase = self.index_for_timestep(a__ )
# advance index counter by 1
_lowerCamelCase = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCamelCase = self.sigmas[step_index]
_lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_lowerCamelCase = self.sigmas[step_index - 1]
_lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCamelCase = 0
_lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
_lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
_lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_lowerCamelCase = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
_lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
_lowerCamelCase = derivative
_lowerCamelCase = dt
_lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
_lowerCamelCase = (sample - pred_original_sample) / sigma_next
_lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_lowerCamelCase = self.dt
_lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def snake_case_ ( self , a__ , a__ , a__ , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
_lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_lowerCamelCase = self.timesteps.to(original_samples.device )
_lowerCamelCase = timesteps.to(original_samples.device )
_lowerCamelCase = [self.index_for_timestep(a__ , a__ ) for t in timesteps]
_lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_lowerCamelCase = sigma.unsqueeze(-1 )
_lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 222
| 0
|
"""simple docstring"""
import cva
import numpy as np
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if k in (0.04, 0.06):
snake_case : Optional[Any] = k
snake_case : int = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> str:
'''simple docstring'''
return str(self.k )
def lowerCamelCase ( self , UpperCamelCase__ ) -> tuple[cva.Mat, list[list[int]]]:
'''simple docstring'''
snake_case : int = cva.imread(UpperCamelCase__ , 0 )
snake_case ,snake_case : Optional[Any] = img.shape
snake_case : list[list[int]] = []
snake_case : Union[str, Any] = img.copy()
snake_case : str = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
snake_case ,snake_case : List[str] = np.gradient(UpperCamelCase__ )
snake_case : Optional[Any] = dx**2
snake_case : str = dy**2
snake_case : Optional[Any] = dx * dy
snake_case : Union[str, Any] = 0.04
snake_case : Any = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
snake_case : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case : Any = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
snake_case : Union[str, Any] = (wxx * wyy) - (wxy**2)
snake_case : Union[str, Any] = wxx + wyy
snake_case : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__snake_case = HarrisCorner(0.04, 3)
__snake_case , __snake_case = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 178
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Any = '''maskformer'''
__UpperCAmelCase : List[str] = {'''hidden_size''': '''mask_feature_size'''}
__UpperCAmelCase : Optional[Any] = ['''resnet''', '''swin''']
__UpperCAmelCase : str = ['''detr''']
def __init__( self , UpperCamelCase__ = 256 , UpperCamelCase__ = 256 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 20.0 , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case : Tuple = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[Any] = backbone_config.pop("model_type" )
snake_case : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case : str = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case : Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
snake_case : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = CONFIG_MAPPING[decoder_type]
snake_case : List[Any] = config_class.from_dict(UpperCamelCase__ )
snake_case : Optional[int] = backbone_config
snake_case : Optional[Any] = decoder_config
# main feature dimension for the model
snake_case : List[str] = fpn_feature_size
snake_case : Tuple = mask_feature_size
# initializer
snake_case : str = init_std
snake_case : str = init_xavier_std
# Hungarian matcher && loss
snake_case : List[Any] = cross_entropy_weight
snake_case : int = dice_weight
snake_case : str = mask_weight
snake_case : Any = use_auxiliary_loss
snake_case : Any = no_object_weight
snake_case : List[Any] = output_auxiliary_logits
snake_case : Optional[int] = self.decoder_config.encoder_attention_heads
snake_case : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return cls(
backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.backbone_config.to_dict()
snake_case : Union[str, Any] = self.decoder_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 178
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_lowercase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_lowercase )
return parser.parse_args()
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Dict = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(_lowercase )
# Patch sys.argv
_UpperCAmelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 712
|
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328
| 0
|
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
if not len(UpperCamelCase_ ) == len(UpperCamelCase_ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
UpperCamelCase , UpperCamelCase , UpperCamelCase = equationa
UpperCamelCase , UpperCamelCase , UpperCamelCase = equationa
# Calculate the determinants of the matrices
UpperCamelCase = aa * ba - aa * ba
UpperCamelCase = ca * ba - ca * ba
UpperCamelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
UpperCamelCase = determinant_x / determinant
UpperCamelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 537
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : str , snake_case__ : Dict , snake_case__ : Optional[int]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Dict=32 , snake_case__ : Union[str, Any]=5 , snake_case__ : Tuple=4 , snake_case__ : Tuple=37 , snake_case__ : Tuple="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : Dict=10 , snake_case__ : List[str]=0.0_2 , snake_case__ : int=3 , snake_case__ : Tuple=None , snake_case__ : Any=2 , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = DeiTForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ : str = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase_ : int = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ : List[str] = False
lowercase_ : Dict = False
lowercase_ : str = False
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case__ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Tuple=False ):
"""simple docstring"""
__lowerCAmelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(snake_case__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
__lowerCAmelCase = model(**snake_case__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(snake_case__ ),
*get_values(snake_case__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"Testing {model_class} with {problem_type['title']}" ):
__lowerCAmelCase = problem_type["title"]
__lowerCAmelCase = problem_type["num_labels"]
__lowerCAmelCase = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
__lowerCAmelCase = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCAmelCase = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=snake_case__ ) as warning_list:
__lowerCAmelCase = model(**snake_case__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
snake_case__ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case__ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
__lowerCAmelCase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case__ , return_tensors="pt" )
__lowerCAmelCase = inputs.pixel_values.to(snake_case__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(snake_case__ )
| 611
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowercase__( __UpperCamelCase: list ):
"""simple docstring"""
if not postfix_notation:
return 0
SCREAMING_SNAKE_CASE : Optional[int] = {'+', '-', '*', '/'}
SCREAMING_SNAKE_CASE : list[Any] = []
for token in postfix_notation:
if token in operations:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__UpperCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508
|
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if isinstance(__UpperCamelCase ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _a :
'''simple docstring'''
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(A, A, F"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCamelCase_ ( self, A, A, A, A, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxVisionTextDualEncoderModel(A )
SCREAMING_SNAKE_CASE : Any = model(input_ids=A, pixel_values=A, attention_mask=A )
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase_ ( self, A, A, A, A, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.get_vision_text_model(A, A )
SCREAMING_SNAKE_CASE : str = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A )
SCREAMING_SNAKE_CASE : str = model(input_ids=A, pixel_values=A, attention_mask=A )
self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase_ ( self, A, A, A, A, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.get_vision_text_model(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A )
SCREAMING_SNAKE_CASE : str = model(input_ids=A, pixel_values=A, attention_mask=A )
SCREAMING_SNAKE_CASE : Any = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = model(input_ids=A, pixel_values=A, attention_mask=A )
SCREAMING_SNAKE_CASE : int = after_output[0]
SCREAMING_SNAKE_CASE : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self, A, A, A, A, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.get_vision_text_model(A, A )
SCREAMING_SNAKE_CASE : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**A )
SCREAMING_SNAKE_CASE : int = model(
input_ids=A, pixel_values=A, attention_mask=A, output_attentions=A )
SCREAMING_SNAKE_CASE : str = output.vision_model_output.attentions
self.assertEqual(len(A ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Union[str, Any] = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE : List[str] = output.text_model_output.attentions
self.assertEqual(len(A ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
pt_model.to(A )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict
SCREAMING_SNAKE_CASE : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = pt_model(**A ).to_tuple()
SCREAMING_SNAKE_CASE : Dict = fx_model(**A ).to_tuple()
self.assertEqual(len(A ), len(A ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(A, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(A )
SCREAMING_SNAKE_CASE : Any = FlaxVisionTextDualEncoderModel.from_pretrained(A, from_pt=A )
SCREAMING_SNAKE_CASE : str = fx_model_loaded(**A ).to_tuple()
self.assertEqual(len(A ), len(A ), 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(A, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderModel.from_pretrained(A, from_flax=A )
pt_model_loaded.to(A )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = pt_model_loaded(**A ).to_tuple()
self.assertEqual(len(A ), len(A ), 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(A, pt_output_loaded.numpy(), 4E-2 )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(A, A )
SCREAMING_SNAKE_CASE : Optional[Any] = VisionTextDualEncoderModel(A )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxVisionTextDualEncoderModel(A )
SCREAMING_SNAKE_CASE : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), A )
SCREAMING_SNAKE_CASE : List[Any] = fx_state
self.check_pt_flax_equivalence(A, A, A )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderConfig.from_vision_text_configs(A, A )
SCREAMING_SNAKE_CASE : Tuple = VisionTextDualEncoderModel(A )
SCREAMING_SNAKE_CASE : Tuple = FlaxVisionTextDualEncoderModel(A )
SCREAMING_SNAKE_CASE : str = load_flax_weights_in_pytorch_model(A, fx_model.params )
self.check_pt_flax_equivalence(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**A )
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE : Union[str, Any] = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(A, A, A )
self.check_equivalence_flax_to_pt(A, A, A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = model_a(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = model_a(**A )
SCREAMING_SNAKE_CASE : Dict = after_outputs[0]
SCREAMING_SNAKE_CASE : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A, 1E-5 )
@require_flax
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=A, text_from_pt=A, )
SCREAMING_SNAKE_CASE : List[str] = 13
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE : Any = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = FlaxViTModel(A )
SCREAMING_SNAKE_CASE : Any = FlaxBertModel(A )
return vision_model, text_model
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE : Any = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=A, text_from_pt=A, )
SCREAMING_SNAKE_CASE : Optional[int] = 13
SCREAMING_SNAKE_CASE : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE : str = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE : Any = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE : List[str] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = FlaxCLIPVisionModel(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxBertModel(A )
return vision_model, text_model
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE : str = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = vision_config_and_inputs
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE : Any = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE : str = processor(
text=['una foto di un gatto', 'una foto di un cane'], images=A, padding=A, return_tensors='np' )
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image, A, atol=1E-3 ) )
| 508
| 1
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase__ : str = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase__ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase__ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase__ : List[str] = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
lowerCamelCase__ : Union[str, Any] = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
lowerCamelCase__ : Optional[int] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
lowerCamelCase__ : int = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase__ : str = np.expand_dims(test_image, axis=0)
lowerCamelCase__ : Optional[int] = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase__ : List[Any] = "Normal"
if result[0][0] == 1:
lowerCamelCase__ : Any = "Abnormality detected"
| 698
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """markuplm"""
def __init__( self :int , lowerCamelCase_ :List[str]=3_05_22 , lowerCamelCase_ :Union[str, Any]=7_68 , lowerCamelCase_ :str=12 , lowerCamelCase_ :Dict=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :Optional[Any]=0.1 , lowerCamelCase_ :Union[str, Any]=5_12 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Any=1E-12 , lowerCamelCase_ :Dict=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Optional[int]=2 , lowerCamelCase_ :str=2_56 , lowerCamelCase_ :List[Any]=10_24 , lowerCamelCase_ :Union[str, Any]=2_16 , lowerCamelCase_ :Dict=10_01 , lowerCamelCase_ :Any=32 , lowerCamelCase_ :str=50 , lowerCamelCase_ :List[str]="absolute" , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :int=None , **lowerCamelCase_ :Dict , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : int = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Optional[Any] = max_depth
SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Tuple = tag_pad_id
SCREAMING_SNAKE_CASE : str = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 698
| 1
|
"""simple docstring"""
from datetime import datetime
import requests
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowerCamelCase__ : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(_lowercase ).content
if __name__ == "__main__":
UpperCAmelCase : List[Any] = input("Enter Video/IGTV url: ").strip()
UpperCAmelCase : Tuple = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 121
|
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCAmelCase : Dict = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self :Optional[Any] ,__UpperCAmelCase :int=-1 ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = label_idx
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : List[Any] = mode.value
lowerCamelCase__ : Dict = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : int = []
with open(__UpperCAmelCase ,encoding='''utf-8''' ) as f:
lowerCamelCase__ : str = []
lowerCamelCase__ : Any = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
lowerCamelCase__ : str = []
lowerCamelCase__ : Optional[Any] = []
else:
lowerCamelCase__ : Optional[int] = line.split(''' ''' )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' ,'''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
return examples
def lowercase_ ( self :Optional[Any] ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :List ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase__ : str = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(__UpperCAmelCase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' ,line.split()[0] )
def lowercase_ ( self :Any ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
lowerCamelCase__ : str = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Union[str, Any] = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(label_idx=-2 )
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
lowerCamelCase__ : str = f.read().splitlines()
if "O" not in labels:
lowerCamelCase__ : Any = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def lowercase_ ( self :Dict ,__UpperCAmelCase :int ,__UpperCAmelCase :Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = mode.value
lowerCamelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,F"""{mode}.txt""" )
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : int = []
with open(__UpperCAmelCase ,encoding='''utf-8''' ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
lowerCamelCase__ : str = []
lowerCamelCase__ : Tuple = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" ,words=__UpperCAmelCase ,labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def lowercase_ ( self :str ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :TextIO ,__UpperCAmelCase :List ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Tuple = 0
for sentence in parse_incr(__UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = preds_list[example_id]
lowerCamelCase__ : Optional[Any] = ''''''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def lowercase_ ( self :Dict ,__UpperCAmelCase :str ) -> List[str]:
"""simple docstring"""
if path:
with open(__UpperCAmelCase ,'''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 121
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__: Any = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Tuple ) -> List[List[ImageInput]]:
if isinstance(a_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a_ ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = ['''pixel_values''']
def __init__( self : str , __snake_case : int = True , __snake_case : Optional[Any] = None , __snake_case : List[str] = PILImageResampling.BILINEAR , __snake_case : int = True , __snake_case : Optional[int] = None , __snake_case : Tuple = True , __snake_case : List[str] = 1 / 255 , __snake_case : Any = True , __snake_case : Optional[int] = None , __snake_case : Tuple = None , **__snake_case : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''' )
UpperCAmelCase : Dict = do_resize
UpperCAmelCase : Any = size
UpperCAmelCase : Dict = do_center_crop
UpperCAmelCase : List[str] = crop_size
UpperCAmelCase : Union[str, Any] = resample
UpperCAmelCase : List[Any] = do_rescale
UpperCAmelCase : List[str] = rescale_factor
UpperCAmelCase : Tuple = do_normalize
UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] = PILImageResampling.BILINEAR , __snake_case : int = None , **__snake_case : Any , ) -> np.ndarray:
UpperCAmelCase : str = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" in size:
UpperCAmelCase : Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
UpperCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A ( self : Dict , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[Any] = None , **__snake_case : str , ) -> np.ndarray:
UpperCAmelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A ( self : Optional[Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple = None , **__snake_case : List[Any] , ) -> Optional[Any]:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Dict , __snake_case : Any = None , **__snake_case : Optional[Any] , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def A ( self : str , __snake_case : str , __snake_case : int = None , __snake_case : Optional[Any] = None , __snake_case : str = None , __snake_case : Dict = None , __snake_case : int = None , __snake_case : List[Any] = None , __snake_case : Dict = None , __snake_case : Union[str, Any] = None , __snake_case : Optional[Any] = None , __snake_case : Tuple = None , __snake_case : Optional[Any] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase : Optional[int] = to_numpy_array(SCREAMING_SNAKE_CASE__ )
if do_resize:
UpperCAmelCase : List[Any] = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ )
if do_center_crop:
UpperCAmelCase : Optional[Any] = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ )
if do_rescale:
UpperCAmelCase : Optional[Any] = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ )
if do_normalize:
UpperCAmelCase : List[Any] = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Any = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return image
def A ( self : List[str] , __snake_case : Any , __snake_case : List[str] = None , __snake_case : List[Any] = None , __snake_case : Union[str, Any] = None , __snake_case : Any = None , __snake_case : List[str] = None , __snake_case : str = None , __snake_case : Any = None , __snake_case : List[str] = None , __snake_case : Any = None , __snake_case : int = None , __snake_case : Union[str, Any] = None , __snake_case : Tuple = ChannelDimension.FIRST , **__snake_case : List[Any] , ) -> PIL.Image.Image:
UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
UpperCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase : Tuple = size if size is not None else self.size
UpperCAmelCase : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name='''crop_size''' )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase : Dict = make_batched(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase : int = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , )
for img in video
]
for video in videos
]
UpperCAmelCase : List[str] = {'''pixel_values''': videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 127
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE( A ):
@staticmethod
@abstractmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError()
| 498
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowercase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 135
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowercase = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : str = SavedModel()
a : List[Any] = []
with open(os.path.join(A_ , "utils" , "tf_ops" , "onnx.json" ) ) as f:
a : Optional[int] = json.load(A_ )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(A_ )] )
with open(A_ , "rb" ) as f:
saved_model.ParseFromString(f.read() )
a : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a : Union[str, Any] = sorted(A_ )
a : Dict = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(A_ )
if strict and len(A_ ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(A_ ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*A_ , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
__lowercase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 135
| 1
|
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__magic_name__ = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _lowerCAmelCase ( UpperCamelCase_ ):
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=__UpperCamelCase )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = tmp_path_factory.getbasetemp() / """cache"""
__SCREAMING_SNAKE_CASE = test_hf_cache_home / """datasets"""
__SCREAMING_SNAKE_CASE = test_hf_cache_home / """metrics"""
__SCREAMING_SNAKE_CASE = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(__UpperCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(__UpperCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(__UpperCamelCase ) )
__SCREAMING_SNAKE_CASE = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(__UpperCamelCase ) )
__SCREAMING_SNAKE_CASE = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope="""session""" )
def _lowerCAmelCase ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def _lowerCAmelCase ( UpperCamelCase_ ):
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , __UpperCamelCase )
@pytest.fixture
def _lowerCAmelCase ( UpperCamelCase_ ):
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , __UpperCamelCase )
| 155
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : str = {'vocab_file': 'sentencepiece.model'}
snake_case : List[str] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
snake_case : List[str] = {
'google/rembert': 256,
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(__UpperCAmelCase )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
__lowercase = self.sp_model.EncodeAsPieces(__UpperCAmelCase )
return pieces
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.sp_model.decode_pieces(__UpperCAmelCase )
return out_string
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
__lowercase = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 566
| 0
|
'''simple docstring'''
class UpperCamelCase__ : # Public class to implement a graph
"""simple docstring"""
def __init__( self : int , __A : int , __A : int , __A : list[list[bool]] ):
"""simple docstring"""
_lowercase = row
_lowercase = col
_lowercase = graph
def snake_case ( self : Dict , __A : int , __A : int , __A : list[list[bool]] ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def snake_case ( self : List[str] , __A : int , __A : int , __A : list[list[bool]] ):
"""simple docstring"""
_lowercase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowercase = [-1, 0, 1, -1, 1, -1, 0, 1]
_lowercase = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __A )
def snake_case ( self : List[str] ): # And finally, count all islands.
"""simple docstring"""
_lowercase = [[False for j in range(self.COL )] for i in range(self.ROW )]
_lowercase = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__A , __A , __A )
count += 1
return count
| 701
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__magic_name__ : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Whether tp freeze the encoder.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
UpperCAmelCase__ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
UpperCAmelCase__ = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
UpperCAmelCase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCAmelCase__ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
UpperCAmelCase__ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
UpperCAmelCase__ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Source language id for translation.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': 'Target language id for translation.'} )
UpperCAmelCase__ = field(default=lowerCamelCase__ , metadata={'help': '# num_beams to use for evaluation.'} )
UpperCAmelCase__ = field(
default=lowerCamelCase__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def A__ ( A_ , A_ , A_ ) -> Optional[int]:
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(A_ , os.path.join(A_ , F"""{split}_results.json""" ) )
def A__ ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowercase , _lowercase , _lowercase = parser.parse_args_into_dataclasses()
check_output_dir(A_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , A_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(A_ , A_ , A_ ):
assert hasattr(A_ , A_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(A_ , A_ , getattr(A_ , A_ ) )
_lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=A_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A_ , A_ ):
_lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowercase = SeqaSeqDataset
# Get datasets
_lowercase = (
dataset_class(
A_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
_lowercase = (
dataset_class(
A_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowercase = (
dataset_class(
A_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowercase = (
build_compute_metrics_fn(data_args.task , A_ ) if training_args.predict_with_generate else None
)
_lowercase = SeqaSeqTrainer(
model=A_ , args=A_ , data_args=A_ , train_dataset=A_ , eval_dataset=A_ , data_collator=SeqaSeqDataCollator(
A_ , A_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A_ , tokenizer=A_ , )
_lowercase = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
_lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowercase = train_result.metrics
_lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , A_ , training_args.output_dir )
all_metrics.update(A_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_lowercase = trainer.evaluate(metric_key_prefix="val" )
_lowercase = data_args.n_val
_lowercase = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , A_ , training_args.output_dir )
all_metrics.update(A_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
_lowercase = trainer.predict(test_dataset=A_ , metric_key_prefix="test" )
_lowercase = test_output.metrics
_lowercase = data_args.n_test
if trainer.is_world_process_zero():
_lowercase = round(metrics["test_loss"] , 4 )
handle_metrics("test" , A_ , training_args.output_dir )
all_metrics.update(A_ )
if training_args.predict_with_generate:
_lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
_lowercase = lmap(str.strip , A_ )
write_txt_file(A_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(A_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def A__ ( A_ ) -> List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 602
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__magic_name__ = TypeVar("T")
def _lowerCAmelCase ( UpperCamelCase_ ):
return (position - 1) // 2
def _lowerCAmelCase ( UpperCamelCase_ ):
return (2 * position) + 1
def _lowerCAmelCase ( UpperCamelCase_ ):
return (2 * position) + 2
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self):
return self.elements
def __repr__( self):
return str(self.heap)
def snake_case_ ( self):
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
# Add an element with given priority to the queue
self.heap.append((elem, weight))
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(snake_case_)
def snake_case_ ( self):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(snake_case_)
return elem
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
# Update the weight of the given key
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(snake_case_)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(snake_case_)
else:
self._bubble_down(snake_case_)
else:
self._bubble_down(snake_case_)
def snake_case_ ( self , lowerCAmelCase__):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(snake_case_)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(snake_case_ , snake_case_)
return self._bubble_up(snake_case_)
return None
def snake_case_ ( self , lowerCAmelCase__):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(snake_case_)
__SCREAMING_SNAKE_CASE = get_child_right_position(snake_case_)
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(snake_case_ , snake_case_)
return self._bubble_down(snake_case_)
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(snake_case_ , snake_case_)
return self._bubble_down(snake_case_)
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(snake_case_ , snake_case_)
return self._bubble_down(snake_case_)
return None
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
# Swap the nodes at the given positions
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
"""simple docstring"""
def __init__( self):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self):
return str(self.connections)
def __len__( self):
return self.nodes
def snake_case_ ( self , lowerCAmelCase__):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
# Add an edge between 2 nodes in the graph
self.add_node(snake_case_)
self.add_node(snake_case_)
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def _lowerCAmelCase ( UpperCamelCase_ , ):
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCamelCase_ , UpperCamelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase_ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCamelCase_ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 155
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
_SCREAMING_SNAKE_CASE = F"""https://www.google.com/search?q={query}&num=100"""
_SCREAMING_SNAKE_CASE = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
_SCREAMING_SNAKE_CASE = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
_SCREAMING_SNAKE_CASE = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 163
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = XGLMConfig
UpperCamelCase_ : Optional[int] = {}
UpperCamelCase_ : Union[str, Any] = '''gelu'''
def __init__( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=99 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : List[Any]=37 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Dict=0.02 , ):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : List[str] = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = ffn_dim
SCREAMING_SNAKE_CASE : List[str] = activation_function
SCREAMING_SNAKE_CASE : Dict = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : Optional[Any] = 1
def _A ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : int = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
SCREAMING_SNAKE_CASE : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _A ( self : Optional[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase_ , )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : int = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Optional[int] = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : int = False
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Optional[int] = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def _A ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@slow
def _A ( self : List[Any] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = TFXGLMModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _A ( self : Optional[Any] ):
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[str] , UpperCAmelCase_ : Optional[Any]=True ):
SCREAMING_SNAKE_CASE : str = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE : Tuple = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase_ )
@slow
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
SCREAMING_SNAKE_CASE : Optional[int] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ , seed=[7, 0] )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Dict = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE : Dict = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
SCREAMING_SNAKE_CASE : Union[str, Any] = "left"
# use different length sentences to test batching
SCREAMING_SNAKE_CASE : Optional[Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
SCREAMING_SNAKE_CASE : List[Any] = tokenizer(UpperCAmelCase_ , return_tensors="tf" , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = inputs["input_ids"]
SCREAMING_SNAKE_CASE : str = model.generate(input_ids=UpperCAmelCase_ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE : List[str] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
SCREAMING_SNAKE_CASE : List[str] = model.generate(input_ids=UpperCAmelCase_ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [non_padded_sentence, padded_sentence] )
| 488
|
from __future__ import annotations
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = str(lowercase )
return n == n[::-1]
def lowerCamelCase__ ( lowercase = 1000000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0
for i in range(1 , lowercase ):
if is_palindrome(lowercase ) and is_palindrome(bin(lowercase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 488
| 1
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a_ : Optional[Any] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a_ : List[str] = None
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_UpperCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_UpperCAmelCase , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = bool(qa['answers']['text'])
return qid_to_has_ans
def lowerCamelCase__ (_UpperCAmelCase):
def remove_articles(_UpperCAmelCase):
return ARTICLES_REGEX.sub(' ' , _UpperCAmelCase)
def white_space_fix(_UpperCAmelCase):
return " ".join(text.split())
def remove_punc(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(_UpperCAmelCase):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase))))
def lowerCamelCase__ (_UpperCAmelCase):
if not s:
return []
return normalize_answer(_UpperCAmelCase).split()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return int(normalize_answer(_UpperCAmelCase) == normalize_answer(_UpperCAmelCase))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = get_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = collections.Counter(_UpperCAmelCase) & collections.Counter(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(common.values())
if len(_UpperCAmelCase) == 0 or len(_UpperCAmelCase) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE = qa['id']
SCREAMING_SNAKE_CASE = [t for t in qa['answers']['text'] if normalize_answer(_UpperCAmelCase)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
SCREAMING_SNAKE_CASE = preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE = max(compute_exact(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
SCREAMING_SNAKE_CASE = max(compute_fa(_UpperCAmelCase , _UpperCAmelCase) for a in gold_answers)
return exact_scores, fa_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = {}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE = na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE = float(not qid_to_has_ans[qid])
else:
SCREAMING_SNAKE_CASE = s
return new_scores
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None):
if not qid_list:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores.values()) / total),
('f1', 1_00.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return collections.OrderedDict(
[
('exact', 1_00.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_00.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
for k in new_eval:
SCREAMING_SNAKE_CASE = new_eval[k]
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
plt.step(_UpperCAmelCase , _UpperCAmelCase , color='b' , alpha=0.2 , where='post')
plt.fill_between(_UpperCAmelCase , _UpperCAmelCase , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(_UpperCAmelCase)
plt.savefig(_UpperCAmelCase)
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 1.0
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = [1.0]
SCREAMING_SNAKE_CASE = [0.0]
SCREAMING_SNAKE_CASE = 0.0
for i, qid in enumerate(_UpperCAmelCase):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE = true_pos / float(i + 1)
SCREAMING_SNAKE_CASE = true_pos / float(_UpperCAmelCase)
if i == len(_UpperCAmelCase) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_UpperCAmelCase)
recalls.append(_UpperCAmelCase)
if out_image:
plot_pr_curve(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if out_image_dir and not os.path.exists(_UpperCAmelCase):
os.makedirs(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
SCREAMING_SNAKE_CASE = {k: float(_UpperCAmelCase) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE = make_precision_recall_eval(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , out_image=os.path.join(_UpperCAmelCase , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_exact')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_f1')
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'pr_oracle')
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if not qid_list:
return
SCREAMING_SNAKE_CASE = [na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE = np.ones_like(_UpperCAmelCase) / float(len(_UpperCAmelCase))
plt.hist(_UpperCAmelCase , weights=_UpperCAmelCase , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(_UpperCAmelCase , F'''na_prob_hist_{name}.png'''))
plt.clf()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
SCREAMING_SNAKE_CASE = num_no_ans
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase: na_probs[k])
for i, qid in enumerate(_UpperCAmelCase):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE = scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = 0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE = cur_score
SCREAMING_SNAKE_CASE = na_probs[qid]
return 1_00.0 * best_score / len(_UpperCAmelCase), best_thresh
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = find_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = best_exact
SCREAMING_SNAKE_CASE = exact_thresh
SCREAMING_SNAKE_CASE = best_fa
SCREAMING_SNAKE_CASE = fa_thresh
def lowerCamelCase__ ():
with open(OPTS.data_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = dataset_json['data']
with open(OPTS.pred_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE = {k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE = make_qid_to_has_ans(_UpperCAmelCase) # maps qid to True/False
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE = [k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_raw_scores(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = apply_no_ans_threshold(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.na_prob_thresh)
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase)
if has_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'HasAns')
if no_ans_qids:
SCREAMING_SNAKE_CASE = make_eval_dict(_UpperCAmelCase , _UpperCAmelCase , qid_list=_UpperCAmelCase)
merge_eval(_UpperCAmelCase , _UpperCAmelCase , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir)
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(_UpperCAmelCase , _UpperCAmelCase , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase)
else:
print(json.dumps(_UpperCAmelCase , indent=2))
if __name__ == "__main__":
a_ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 73
|
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Tuple ):
'''simple docstring'''
__lowercase =[0 for i in range(r + 1 )]
# nc0 = 1
__lowercase =1
for i in range(1, n + 1 ):
# to compute current row from previous row.
__lowercase =min(lowercase__, lowercase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 119
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: Union[str, Any] = """lxmert"""
lowerCamelCase__: List[Any] = {}
def __init__( self : int , lowerCamelCase_ : Any=3_0_5_2_2 , lowerCamelCase_ : List[str]=7_6_8 , lowerCamelCase_ : List[Any]=1_2 , lowerCamelCase_ : Optional[int]=9_5_0_0 , lowerCamelCase_ : Dict=1_6_0_0 , lowerCamelCase_ : Union[str, Any]=4_0_0 , lowerCamelCase_ : Dict=3_0_7_2 , lowerCamelCase_ : Union[str, Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=5_1_2 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Dict=0.0_2 , lowerCamelCase_ : Any=1E-12 , lowerCamelCase_ : Optional[Any]=9 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=5 , lowerCamelCase_ : Tuple=2_0_4_8 , lowerCamelCase_ : Tuple=4 , lowerCamelCase_ : Optional[int]=6.6_7 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[int]=True , **lowerCamelCase_ : List[Any] , ):
a_ : Any = vocab_size
a_ : int = hidden_size
a_ : Union[str, Any] = num_attention_heads
a_ : int = hidden_act
a_ : Any = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : Any = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : Union[str, Any] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : Dict = num_qa_labels
a_ : Optional[int] = num_object_labels
a_ : str = num_attr_labels
a_ : Any = l_layers
a_ : Optional[Any] = x_layers
a_ : Tuple = r_layers
a_ : Optional[int] = visual_feat_dim
a_ : List[Any] = visual_pos_dim
a_ : List[str] = visual_loss_normalizer
a_ : List[Any] = task_matched
a_ : Any = task_mask_lm
a_ : Tuple = task_obj_predict
a_ : int = task_qa
a_ : int = visual_obj_loss
a_ : Any = visual_attr_loss
a_ : List[Any] = visual_feat_loss
a_ : str = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**lowerCamelCase_ )
| 478
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__lowerCamelCase = logging.get_logger(__name__)
def _a ( __UpperCamelCase=None , __UpperCamelCase=None ):
return field(default_factory=lambda: default , metadata=__UpperCamelCase )
@dataclass
class a__ :
lowerCamelCase__: List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
lowerCamelCase__: List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
lowerCamelCase__: List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Benchmark training of model"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Verbose memory tracing"""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Trace memory line by line"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Save result to a CSV file"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Save all print statements in a log file"""} )
lowerCamelCase__: bool = field(default=lowerCAmelCase_ , metadata={"""help""": """Whether to print environment information"""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
lowerCamelCase__: str = field(
default=F"""inference_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
lowerCamelCase__: str = field(
default=F"""inference_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
lowerCamelCase__: str = field(
default=F"""train_time_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
lowerCamelCase__: str = field(
default=F"""train_memory_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
lowerCamelCase__: str = field(
default=F"""env_info_{round(time() )}.csv""" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
lowerCamelCase__: str = field(
default=F"""log_{round(time() )}.csv""" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
lowerCamelCase__: int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
lowerCamelCase__: bool = field(
default=lowerCAmelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def UpperCAmelCase( self : Any ):
warnings.warn(
F'''The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'''
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , lowerCamelCase_ , )
def UpperCAmelCase( self : Union[str, Any] ):
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def UpperCAmelCase( self : List[str] ):
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def UpperCAmelCase( self : Union[str, Any] ):
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 478
| 1
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = fname.split(os.path.sep )[-1]
return re.search(r'''^(.*)_\d+\.jpg$''' , __snake_case ).groups()[0]
class lowercase ( a_ ):
def __init__( self : Optional[Any] , _lowercase : Tuple , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = file_names
SCREAMING_SNAKE_CASE__ : str = image_transform
SCREAMING_SNAKE_CASE__ : int = label_to_id
def __len__( self : Any ):
return len(self.file_names )
def __getitem__( self : Dict , _lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = self.file_names[idx]
SCREAMING_SNAKE_CASE__ : List[Any] = PIL.Image.open(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = raw_image.convert('''RGB''' )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_transform(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = extract_label(__UpperCamelCase )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def a ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : List[Any] = config['''lr''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Tuple = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config['''image_size''']
if not isinstance(__snake_case , (list, tuple) ):
SCREAMING_SNAKE_CASE__ : int = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE__ : Dict = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE__ : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
SCREAMING_SNAKE_CASE__ : Dict = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : str = os.path.split(__snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Grab all the image filenames
SCREAMING_SNAKE_CASE__ : Tuple = [os.path.join(args.data_dir , __snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [extract_label(__snake_case ) for fname in file_names]
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(set(__snake_case ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE__ : List[str] = {lbl: i for i, lbl in enumerate(__snake_case )}
# Set the seed before splitting the data.
np.random.seed(__snake_case )
torch.manual_seed(__snake_case )
torch.cuda.manual_seed_all(__snake_case )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE__ : List[str] = np.random.permutation(len(__snake_case ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(0.8 * len(__snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = random_perm[:cut]
SCREAMING_SNAKE_CASE__ : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE__ : Any = Compose([RandomResizedCrop(__snake_case , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE__ : Tuple = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__snake_case , label_to_id=__snake_case )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE__ : Tuple = Compose([Resize(__snake_case ), ToTensor()] )
SCREAMING_SNAKE_CASE__ : List[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__snake_case , label_to_id=__snake_case )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : str = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
SCREAMING_SNAKE_CASE__ : Dict = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : Any = create_model('''resnet50d''' , pretrained=__snake_case , num_classes=len(__snake_case ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Dict = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE__ : Dict = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE__ : Optional[Any] = OneCycleLR(optimizer=__snake_case , max_lr=__snake_case , epochs=__snake_case , steps_per_epoch=len(__snake_case ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE__ : int = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE__ : Tuple = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE__ : Dict = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.splitext(__snake_case )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE__ : str = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
SCREAMING_SNAKE_CASE__ : List[str] = None
else:
SCREAMING_SNAKE_CASE__ : Tuple = int(training_difference.replace('''step_''' , '''''' ) )
SCREAMING_SNAKE_CASE__ : Dict = resume_step // len(__snake_case )
resume_step -= starting_epoch * len(__snake_case )
# Now we train the model
for epoch in range(__snake_case , __snake_case ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE__ : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE__ : Optional[Any] = accelerator.skip_first_batches(__snake_case , __snake_case )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE__ : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE__ : str = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE__ : Tuple = (batch['''image'''] - mean) / std
SCREAMING_SNAKE_CASE__ : List[Any] = model(__snake_case )
SCREAMING_SNAKE_CASE__ : Any = torch.nn.functional.cross_entropy(__snake_case , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__snake_case , __snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE__ : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE__ : Tuple = (batch['''image'''] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(__snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = accelerator.gather_for_metrics((predictions, batch['''label''']) )
SCREAMING_SNAKE_CASE__ : str = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE__ : Tuple = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_0_0 * eval_metric,
'''train_loss''': total_loss.item() / len(__snake_case ),
'''epoch''': epoch,
} , step=__snake_case , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE__ : str = f"""epoch_{epoch}"""
if args.output_dir is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(args.output_dir , __snake_case )
accelerator.save_state(__snake_case )
if args.with_tracking:
accelerator.end_training()
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=__snake_case , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=__snake_case , default=__snake_case , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=__snake_case , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__snake_case , default=__snake_case , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=__snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Tuple = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 6_4, '''image_size''': 2_2_4}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 35
|
from sklearn.metrics import fa_score
import datasets
lowerCamelCase : Union[str, Any] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCamelCase : Optional[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCamelCase : Optional[Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase (datasets.Metric ):
def __UpperCAmelCase ( self )-> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=1 , __UpperCamelCase="binary" , __UpperCamelCase=None )-> Optional[int]:
__lowerCAmelCase = fa_score(
__UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase )
return {"f1": float(__UpperCamelCase ) if score.size == 1 else score}
| 367
| 0
|
"""simple docstring"""
import math
def _a ( _snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( _snake_case = 0.1 ):
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE = '''CLIPImageProcessor'''
SCREAMING_SNAKE_CASE = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,A=None ,A=None ,**A ):
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,A ,)
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(A ,A )
def __call__( self ,A=None ,A=None ,A=None ,**A ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCAmelCase = self.tokenizer(A ,return_tensors=A ,**A )
if images is not None:
UpperCAmelCase = self.image_processor(A ,return_tensors=A ,**A )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) ,tensor_type=A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.batch_decode(*A ,**A )
def _UpperCamelCase ( self ,*A ,**A ):
return self.tokenizer.decode(*A ,**A )
@property
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,A ,)
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,A ,)
return self.image_processor
| 74
| 1
|
from ...configuration_utils import PretrainedConfig
class lowercase ( a ):
lowercase__ : Optional[Any] = """bert-generation"""
def __init__( self : List[str] , _UpperCamelCase : Any=50_358 , _UpperCamelCase : List[Any]=1_024 , _UpperCamelCase : Tuple=24 , _UpperCamelCase : Union[str, Any]=16 , _UpperCamelCase : str=4_096 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Tuple=512 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Any=0 , _UpperCamelCase : Union[str, Any]=2 , _UpperCamelCase : Dict=1 , _UpperCamelCase : List[str]="absolute" , _UpperCamelCase : Optional[Any]=True , **_UpperCamelCase : Optional[Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
| 403
|
_lowerCamelCase : Optional[Any] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 403
| 1
|
'''simple docstring'''
from collections import defaultdict
class _A :
'''simple docstring'''
def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] )-> List[Any]:
snake_case__ : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case__ : str = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
snake_case__ : Optional[int] = defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case__ : Union[str, Any] = (1 << len(lowerCamelCase )) - 1
def __lowerCAmelCase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any] )-> Union[str, Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case__ : Dict = self.count_ways_until(lowerCamelCase , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case__ : str = total_ways_util
return self.dp[mask][task_no]
def __lowerCAmelCase ( self : Tuple , lowerCamelCase : Optional[int] )-> List[str]:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCAmelCase__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCAmelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 172
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 172
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowercase_ ( __A : Dict ) -> Optional[int]:
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowercase_ ( ) -> str:
"""simple docstring"""
lowercase : Any =ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__A )
lowercase : Tuple =parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
lowercase , lowercase : List[str] =parser.parse_known_args()
if not hasattr(__A , '''func''' ):
parser.print_help()
exit(1 )
lowercase : Optional[Any] =parse_unknown_args(__A )
# Run
lowercase : Optional[int] =args.func(__A , **__A )
service.run()
if __name__ == "__main__":
main()
| 94
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __lowercase ( __snake_case ):
def __init__(self : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : str ) -> Dict:
_lowercase : Tuple = dataset
_lowercase : List[str] = process
_lowercase : Any = params
def __len__(self : Optional[Any] ) -> Any:
return len(self.dataset )
def __getitem__(self : int , snake_case : Any ) -> int:
_lowercase : Optional[Any] = self.dataset[i]
_lowercase : Union[str, Any] = self.process(snake_case , **self.params )
return processed
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : List[Any] , snake_case : Tuple , snake_case : Tuple , snake_case : List[str]=None ) -> Optional[int]:
_lowercase : List[str] = loader
_lowercase : Optional[Any] = infer
_lowercase : List[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowercase : str = None
_lowercase : Optional[int] = loader_batch_size
# Internal bookkeeping
_lowercase : Dict = None
_lowercase : Union[str, Any] = None
def __len__(self : Any ) -> Any:
return len(self.loader )
def __iter__(self : int ) -> Optional[Any]:
_lowercase : List[str] = iter(self.loader )
return self
def _a(self : int ) -> List[str]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowercase : Any = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowercase : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(snake_case , snake_case ):
# Convert ModelOutput to tuple first
_lowercase : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowercase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(snake_case , snake_case ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowercase : Optional[int] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowercase : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowercase : Optional[Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowercase : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowercase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowercase : int = self._loader_batch_data.__class__(snake_case )
self._loader_batch_index += 1
return result
def _a(self : Any ) -> str:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowercase : str = next(self.iterator )
_lowercase : Any = self.infer(snake_case , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(snake_case , torch.Tensor ):
_lowercase : int = processed
else:
_lowercase : List[str] = list(processed.keys() )[0]
_lowercase : List[str] = processed[key]
if isinstance(snake_case , snake_case ):
_lowercase : int = len(snake_case )
else:
_lowercase : Optional[int] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Union[str, Any] = observed_batch_size
# Setting internal index to unwrap the batch
_lowercase : Any = processed
_lowercase : Dict = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __lowercase ( __snake_case ):
def __init__(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[str]=None ) -> List[str]:
super().__init__(snake_case , snake_case , snake_case )
def __iter__(self : List[Any] ) -> str:
_lowercase : int = iter(self.loader )
_lowercase : Optional[int] = None
return self
def _a(self : Optional[Any] ) -> Optional[int]:
if self.subiterator is None:
_lowercase : List[str] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowercase : str = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowercase : Any = self.infer(next(self.iterator ) , **self.params )
_lowercase : Optional[int] = next(self.subiterator )
return processed
class __lowercase ( __snake_case ):
def __iter__(self : List[str] ) -> List[str]:
_lowercase : Any = iter(self.loader )
return self
def _a(self : List[Any] ) -> Union[str, Any]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowercase : Dict = False
_lowercase : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowercase : Tuple = self.loader_batch_item()
_lowercase : List[Any] = item.pop("is_last" )
accumulator.append(snake_case )
if is_last:
return accumulator
while not is_last:
_lowercase : List[Any] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(snake_case , torch.Tensor ):
_lowercase : Optional[Any] = processed
else:
_lowercase : Tuple = list(processed.keys() )[0]
_lowercase : int = processed[key]
if isinstance(snake_case , snake_case ):
_lowercase : str = len(snake_case )
else:
_lowercase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowercase : Tuple = observed_batch_size
_lowercase : Any = processed
_lowercase : Dict = 0
while self._loader_batch_index < self.loader_batch_size:
_lowercase : str = self.loader_batch_item()
_lowercase : int = item.pop("is_last" )
accumulator.append(snake_case )
if is_last:
return accumulator
else:
_lowercase : str = processed
_lowercase : int = item.pop("is_last" )
accumulator.append(snake_case )
return accumulator
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : Dataset , snake_case : str ) -> List[Any]:
_lowercase : Optional[Any] = dataset
_lowercase : Any = key
def __len__(self : Any ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__(self : int , snake_case : Any ) -> Any:
return self.dataset[i][self.key]
class __lowercase ( __snake_case ):
def __init__(self : int , snake_case : Dataset , snake_case : str , snake_case : str ) -> Dict:
_lowercase : int = dataset
_lowercase : Optional[Any] = keya
_lowercase : Tuple = keya
def __len__(self : List[str] ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__(self : Optional[Any] , snake_case : Dict ) -> int:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 461
| 0
|
lowerCamelCase : Optional[Any] = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase : Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 715
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'gptsan-japanese'
lowerCAmelCase_ = [
'past_key_values',
]
lowerCAmelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str],__A : Union[str, Any]=3_6_0_0_0,__A : Any=1_2_8_0,__A : List[str]=1_0_2_4,__A : List[str]=8_1_9_2,__A : Any=4_0_9_6,__A : int=1_2_8,__A : List[Any]=1_0,__A : Any=0,__A : int=1_6,__A : str=1_6,__A : str=1_2_8,__A : List[str]=0.0,__A : int=1e-5,__A : List[str]=False,__A : List[Any]=0.0,__A : Optional[int]="float32",__A : Any=False,__A : List[Any]=False,__A : Any=False,__A : Dict=0.002,__A : Tuple=False,__A : Optional[Any]=True,__A : Union[str, Any]=3_5_9_9_8,__A : List[Any]=3_5_9_9_5,__A : Tuple=3_5_9_9_9,**__A : List[Any],):
_lowerCamelCase : int = vocab_size
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Dict = d_model
_lowerCamelCase : List[str] = d_ff
_lowerCamelCase : int = d_ext
_lowerCamelCase : Optional[Any] = d_spout
_lowerCamelCase : int = num_switch_layers
_lowerCamelCase : Dict = num_ext_layers
_lowerCamelCase : List[str] = num_switch_layers + num_ext_layers
_lowerCamelCase : List[str] = num_heads
_lowerCamelCase : Tuple = num_experts
_lowerCamelCase : List[str] = expert_capacity
_lowerCamelCase : str = dropout_rate
_lowerCamelCase : List[Any] = layer_norm_epsilon
_lowerCamelCase : Optional[int] = router_bias
_lowerCamelCase : List[str] = router_jitter_noise
_lowerCamelCase : int = router_dtype
_lowerCamelCase : Optional[int] = router_ignore_padding_tokens
_lowerCamelCase : Optional[Any] = output_hidden_states
_lowerCamelCase : Optional[int] = output_attentions
_lowerCamelCase : List[Any] = initializer_factor
_lowerCamelCase : Union[str, Any] = output_router_logits
_lowerCamelCase : Optional[Any] = use_cache
super().__init__(
separator_token_id=__A,pad_token_id=__A,eos_token_id=__A,**__A,)
| 44
|
import os
import sys
import unittest
__UpperCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = find_backend(' if not is_torch_available():' )
self.assertEqual(UpperCamelCase , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers_and_onnx' )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , UpperCamelCase )
self.assertIn('torch_and_transformers' , UpperCamelCase )
self.assertIn('flax_and_transformers' , UpperCamelCase )
self.assertIn('torch_and_transformers_and_onnx' , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(UpperCamelCase , '\nCONSTANT = None\n' )
snake_case__ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
UpperCamelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
snake_case__ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
snake_case__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , UpperCamelCase )
| 328
| 0
|
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Optional[Any] = path_or_paths
lowerCamelCase_ : Optional[int] = split if split or isinstance(_A , _A ) else "train"
lowerCamelCase_ : Optional[int] = features
lowerCamelCase_ : Tuple = cache_dir
lowerCamelCase_ : str = keep_in_memory
lowerCamelCase_ : int = streaming
lowerCamelCase_ : Tuple = num_proc
lowerCamelCase_ : Tuple = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self , a_ = None , a_ = None , a_ = False , a_ = False , a_ = None , **a_ , ):
lowerCamelCase_ : Dict = features
lowerCamelCase_ : Union[str, Any] = cache_dir
lowerCamelCase_ : Any = keep_in_memory
lowerCamelCase_ : Dict = streaming
lowerCamelCase_ : List[Any] = num_proc
lowerCamelCase_ : List[Any] = kwargs
@abstractmethod
def _UpperCamelCase ( self ):
pass
| 705
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowerCAmelCase_), magnitude * sin(lowerCAmelCase_)]
return [magnitude * cos(radians(lowerCAmelCase_)), magnitude * sin(radians(lowerCAmelCase_))]
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10**-1):
'''simple docstring'''
lowerCamelCase_ : NDArray[floataa] = cross(lowerCAmelCase_ , lowerCAmelCase_)
lowerCamelCase_ : float = sum(lowerCAmelCase_)
return abs(lowerCAmelCase_) < eps
if __name__ == "__main__":
# Test to check if it works
__magic_name__ = array(
[
polar_force(7_18.4, 1_8_0 - 3_0),
polar_force(8_79.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__magic_name__ = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__magic_name__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__magic_name__ = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__magic_name__ = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 73
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE : Optional[int] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Tuple = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Any = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
SCREAMING_SNAKE_CASE : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={'files': files} ) )
return splits
def _lowerCAmelCase ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : Tuple = table_cast(_SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase ( self : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
SCREAMING_SNAKE_CASE : Dict = pa.Table.from_pandas(pd.read_pickle(_SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(_SCREAMING_SNAKE_CASE )
| 265
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( __A : Union[str, Any] , __A : Any , __A : Dict ) -> Tuple:
'''simple docstring'''
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : List[Any] = BertForPreTraining(__A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__A , __A , __A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 265
| 1
|
"""simple docstring"""
import math
import qiskit
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1, SCREAMING_SNAKE_CASE_ = 1 ):
if (
isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
or isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE_ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE = qiskit.QuantumRegister(4, 'qr' )
SCREAMING_SNAKE_CASE = qiskit.ClassicalRegister(2, 'cr' )
# list the entries
SCREAMING_SNAKE_CASE = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], SCREAMING_SNAKE_CASE_ ) # measure the last two qbits
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE = qiskit.execute(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, shots=1_0_0_0 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 715
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406
| 0
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
def a__ ( lowercase : Dict, lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = RobertaPreLayerNormConfig.from_pretrained(
lowercase__, architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
_UpperCamelCase = torch.load(hf_hub_download(repo_id=lowercase__, filename='''pytorch_model.bin''' ) )
_UpperCamelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
_UpperCamelCase = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
_UpperCamelCase = tensor_value
_UpperCamelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__, config=lowercase__, state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
_UpperCamelCase = AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : List[str] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 98
|
from __future__ import annotations
import pandas as pd
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0] * no_of_processes
__lowercase= [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase__ ):
__lowercase= burst_time[i]
__lowercase= 0
__lowercase= 0
__lowercase= 9_9_9_9_9_9_9_9_9
__lowercase= 0
__lowercase= False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowercase= remaining_time[j]
__lowercase= j
__lowercase= True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowercase= remaining_time[short]
if minm == 0:
__lowercase= 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__lowercase= False
# Find finish time of current process
__lowercase= increment_time + 1
# Calculate waiting time
__lowercase= finish_time - arrival_time[short]
__lowercase= finar - burst_time[short]
if waiting_time[short] < 0:
__lowercase= 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> list[int]:
'''simple docstring'''
__lowercase= [0] * no_of_processes
for i in range(lowercase__ ):
__lowercase= burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> None:
'''simple docstring'''
__lowercase= 0
__lowercase= 0
for i in range(lowercase__ ):
__lowercase= total_waiting_time + waiting_time[i]
__lowercase= total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase = int(input())
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = [0] * no_of_processes
lowerCAmelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase ,lowerCAmelCase = map(int, input().split())
lowerCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase = burst_time
lowerCAmelCase = no_of_processes
lowerCAmelCase = waiting_time
lowerCAmelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 230
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : Tuple = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 609
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLNetTokenizer
SCREAMING_SNAKE_CASE__ : int = XLNetTokenizerFast
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Any = True
def A_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : List[str] = XLNetTokenizer(snake_case , keep_accents=snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "<s>"
UpperCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case ) , 1_0_0_6 )
def A_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = XLNetTokenizer(snake_case , keep_accents=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case )
UpperCAmelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 609
| 1
|
'''simple docstring'''
from math import isqrt, loga
def __a ( _UpperCamelCase: int ) -> list[int]:
"""simple docstring"""
_snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _UpperCamelCase , _UpperCamelCase ):
_snake_case = False
return [i for i in range(2 , _UpperCamelCase ) if is_prime[i]]
def __a ( _UpperCamelCase: int = 800_800 , _UpperCamelCase: int = 800_800 ) -> int:
"""simple docstring"""
_snake_case = degree * loga(_UpperCamelCase )
_snake_case = int(_UpperCamelCase )
_snake_case = calculate_prime_numbers(_UpperCamelCase )
_snake_case = 0
_snake_case = 0
_snake_case = len(_UpperCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 185
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """rwkv"""
SCREAMING_SNAKE_CASE_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self ,_SCREAMING_SNAKE_CASE=50_277 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 185
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : torch.FloatTensor
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=("DownEncoderBlock2D",) , __lowerCAmelCase=(64,) , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=True , ):
super().__init__()
UpperCamelCase__ = layers_per_block
UpperCamelCase__ = torch.nn.Convad(
__lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase__ = None
UpperCamelCase__ = nn.ModuleList([] )
# down
UpperCamelCase__ = block_out_channels[0]
for i, down_block_type in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = block_out_channels[i]
UpperCamelCase__ = i == len(__lowerCAmelCase ) - 1
UpperCamelCase__ = get_down_block(
__lowerCAmelCase , num_layers=self.layers_per_block , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__lowerCAmelCase , resnet_groups=__lowerCAmelCase , attention_head_dim=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
self.down_blocks.append(__lowerCAmelCase )
# mid
UpperCamelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
# out
UpperCamelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__lowerCAmelCase , eps=1E-6 )
UpperCamelCase__ = nn.SiLU()
UpperCamelCase__ = 2 * out_channels if double_z else out_channels
UpperCamelCase__ = nn.Convad(block_out_channels[-1] , __lowerCAmelCase , 3 , padding=1 )
UpperCamelCase__ = False
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = x
UpperCamelCase__ = self.conv_in(__lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCAmelCase ):
def custom_forward(*__lowerCAmelCase ):
return module(*__lowerCAmelCase )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
# middle
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
else:
for down_block in self.down_blocks:
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase )
# middle
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCamelCase__ = down_block(__lowerCAmelCase )
# middle
UpperCamelCase__ = self.mid_block(__lowerCAmelCase )
# post-process
UpperCamelCase__ = self.conv_norm_out(__lowerCAmelCase )
UpperCamelCase__ = self.conv_act(__lowerCAmelCase )
UpperCamelCase__ = self.conv_out(__lowerCAmelCase )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=3 , __lowerCAmelCase=("UpDecoderBlock2D",) , __lowerCAmelCase=(64,) , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase="group" , ):
super().__init__()
UpperCamelCase__ = layers_per_block
UpperCamelCase__ = nn.Convad(
__lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCamelCase__ = None
UpperCamelCase__ = nn.ModuleList([] )
UpperCamelCase__ = in_channels if norm_type == """spatial""" else None
# mid
UpperCamelCase__ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__lowerCAmelCase , temb_channels=__lowerCAmelCase , )
# up
UpperCamelCase__ = list(reversed(__lowerCAmelCase ) )
UpperCamelCase__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__lowerCAmelCase ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = reversed_block_out_channels[i]
UpperCamelCase__ = i == len(__lowerCAmelCase ) - 1
UpperCamelCase__ = get_up_block(
__lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , prev_output_channel=__lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__lowerCAmelCase , resnet_groups=__lowerCAmelCase , attention_head_dim=__lowerCAmelCase , temb_channels=__lowerCAmelCase , resnet_time_scale_shift=__lowerCAmelCase , )
self.up_blocks.append(__lowerCAmelCase )
UpperCamelCase__ = output_channel
# out
if norm_type == "spatial":
UpperCamelCase__ = SpatialNorm(block_out_channels[0] , __lowerCAmelCase )
else:
UpperCamelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__lowerCAmelCase , eps=1E-6 )
UpperCamelCase__ = nn.SiLU()
UpperCamelCase__ = nn.Convad(block_out_channels[0] , __lowerCAmelCase , 3 , padding=1 )
UpperCamelCase__ = False
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
UpperCamelCase__ = z
UpperCamelCase__ = self.conv_in(__lowerCAmelCase )
UpperCamelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__lowerCAmelCase ):
def custom_forward(*__lowerCAmelCase ):
return module(*__lowerCAmelCase )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
UpperCamelCase__ = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase , use_reentrant=__lowerCAmelCase )
else:
# middle
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCamelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
else:
# middle
UpperCamelCase__ = self.mid_block(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = sample.to(__lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCamelCase__ = up_block(__lowerCAmelCase , __lowerCAmelCase )
# post-process
if latent_embeds is None:
UpperCamelCase__ = self.conv_norm_out(__lowerCAmelCase )
else:
UpperCamelCase__ = self.conv_norm_out(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = self.conv_act(__lowerCAmelCase )
UpperCamelCase__ = self.conv_out(__lowerCAmelCase )
return sample
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="random" , __lowerCAmelCase=False , __lowerCAmelCase=True ):
super().__init__()
UpperCamelCase__ = n_e
UpperCamelCase__ = vq_embed_dim
UpperCamelCase__ = beta
UpperCamelCase__ = legacy
UpperCamelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCamelCase__ = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
UpperCamelCase__ = self.used.shape[0]
UpperCamelCase__ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCamelCase__ = self.re_embed
UpperCamelCase__ = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
UpperCamelCase__ = n_e
UpperCamelCase__ = sane_index_shape
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = inds.shape
assert len(__lowerCAmelCase ) > 1
UpperCamelCase__ = inds.reshape(ishape[0] , -1 )
UpperCamelCase__ = self.used.to(__lowerCAmelCase )
UpperCamelCase__ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCamelCase__ = match.argmax(-1 )
UpperCamelCase__ = match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCamelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCamelCase__ = self.unknown_index
return new.reshape(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = inds.shape
assert len(__lowerCAmelCase ) > 1
UpperCamelCase__ = inds.reshape(ishape[0] , -1 )
UpperCamelCase__ = self.used.to(__lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCamelCase__ = 0 # simply set to zero
UpperCamelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __lowerCAmelCase )
return back.reshape(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
# reshape z -> (batch, height, width, channel) and flatten
UpperCamelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCamelCase__ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCamelCase__ = torch.argmin(torch.cdist(__lowerCAmelCase , self.embedding.weight ) , dim=1 )
UpperCamelCase__ = self.embedding(__lowerCAmelCase ).view(z.shape )
UpperCamelCase__ = None
UpperCamelCase__ = None
# compute loss for embedding
if not self.legacy:
UpperCamelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCamelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCamelCase__ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCamelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCamelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCamelCase__ = self.remap_to_used(__lowerCAmelCase )
UpperCamelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCamelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCamelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis
UpperCamelCase__ = self.unmap_to_all(__lowerCAmelCase )
UpperCamelCase__ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCamelCase__ = self.embedding(__lowerCAmelCase )
if shape is not None:
UpperCamelCase__ = z_q.view(__lowerCAmelCase )
# reshape back to match original input shape
UpperCamelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __SCREAMING_SNAKE_CASE ( _a ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False ):
UpperCamelCase__ = parameters
UpperCamelCase__ , UpperCamelCase__ = torch.chunk(__lowerCAmelCase , 2 , dim=1 )
UpperCamelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCamelCase__ = deterministic
UpperCamelCase__ = torch.exp(0.5 * self.logvar )
UpperCamelCase__ = torch.exp(self.logvar )
if self.deterministic:
UpperCamelCase__ = UpperCamelCase__ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _lowerCamelCase ( self , __lowerCAmelCase = None ):
# make sure sample is on the same device as the parameters and has same dtype
UpperCamelCase__ = randn_tensor(
self.mean.shape , generator=__lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCamelCase__ = self.mean + self.std * sample
return x
def _lowerCamelCase ( self , __lowerCAmelCase=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
UpperCamelCase__ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__lowerCAmelCase )
def _lowerCamelCase ( self ):
return self.mean
| 548
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = """luke"""
def __init__( self , __lowerCAmelCase=50267 , __lowerCAmelCase=500000 , __lowerCAmelCase=768 , __lowerCAmelCase=256 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = entity_vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = entity_emb_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_entity_aware_attention
UpperCamelCase__ = classifier_dropout
| 548
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "funnel"
a__ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[int] , _lowercase : List[Any]=3_05_22 , _lowercase : Dict=[4, 4, 4] , _lowercase : List[str]=None , _lowercase : str=2 , _lowercase : Optional[Any]=7_68 , _lowercase : List[Any]=12 , _lowercase : Optional[int]=64 , _lowercase : Optional[Any]=30_72 , _lowercase : int="gelu_new" , _lowercase : Optional[int]=0.1 , _lowercase : Tuple=0.1 , _lowercase : List[str]=0.0 , _lowercase : Any=0.1 , _lowercase : List[Any]=None , _lowercase : int=1E-9 , _lowercase : List[Any]="mean" , _lowercase : int="relative_shift" , _lowercase : Optional[int]=True , _lowercase : Optional[int]=True , _lowercase : Optional[Any]=True , **_lowercase : Optional[int] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = block_sizes
__UpperCAmelCase = [1] * len(_lowercase ) if block_repeats is None else block_repeats
assert len(_lowercase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__UpperCAmelCase = num_decoder_layers
__UpperCAmelCase = d_model
__UpperCAmelCase = n_head
__UpperCAmelCase = d_head
__UpperCAmelCase = d_inner
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_std
__UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__UpperCAmelCase = attention_type
__UpperCAmelCase = separate_cls
__UpperCAmelCase = truncate_seq
__UpperCAmelCase = pool_q_only
super().__init__(**_lowercase )
@property
def a ( self : List[str] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def a ( self : List[Any] , _lowercase : Optional[Any] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def a ( self : Union[str, Any] ):
return len(self.block_sizes )
@num_blocks.setter
def a ( self : str , _lowercase : int ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 49
|
from math import sqrt
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Optional[Any] =0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def _A ( SCREAMING_SNAKE_CASE : int = 10_000 ):
"""simple docstring"""
a__ : List[Any] =sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 563
| 0
|
import re
import subprocess
import sys
lowerCAmelCase__ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCAmelCase__ = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('utf-8').split()
lowerCAmelCase__ = '|'.join(sys.argv[1:])
lowerCAmelCase__ = re.compile(rF"^({joined_dirs}).*?\.py$")
lowerCAmelCase__ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 700
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __lowercase ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
def is_valid_tree(_UpperCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCAmelCase ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCAmelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCAmelCase )
)
return is_binary_search_tree_recursive_check(_UpperCAmelCase , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __a ( __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = old_name
if "patch_embed" in old_name:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = old_name.split("." )
if layer == "0":
lowerCamelCase_ : str = old_name.replace("0" , "convolution1" )
elif layer == "1":
lowerCamelCase_ : Any = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
lowerCamelCase_ : Any = old_name.replace("3" , "convolution2" )
else:
lowerCamelCase_ : Dict = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __UpperCAmelCase ):
lowerCamelCase_ : List[str] = R"\b\d{2}\b"
if bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) ):
lowerCamelCase_ : Dict = re.search(R"\d\.\d\d." , __UpperCAmelCase ).group()
else:
lowerCamelCase_ : List[str] = re.search(R"\d\.\d." , __UpperCAmelCase ).group()
if int(match[0] ) < 6:
lowerCamelCase_ : int = old_name.replace(__UpperCAmelCase , "" )
lowerCamelCase_ : Optional[int] = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
lowerCamelCase_ : Optional[Any] = "intermediate_stages." + trimmed_name
else:
lowerCamelCase_ : Any = old_name.replace(__UpperCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase_ : Union[str, Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
lowerCamelCase_ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase_ : Union[str, Any] = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
lowerCamelCase_ : Optional[int] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
lowerCamelCase_ : List[str] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
lowerCamelCase_ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
lowerCamelCase_ : Union[str, Any] = trimmed_name.replace("fc2" , "linear_out" )
lowerCamelCase_ : Union[str, Any] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __UpperCAmelCase ):
lowerCamelCase_ : Any = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
lowerCamelCase_ : Optional[Any] = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase_ : Optional[Any] = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase_ : Dict = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
lowerCamelCase_ : Dict = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
lowerCamelCase_ : Optional[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
lowerCamelCase_ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
lowerCamelCase_ : List[str] = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase_ : List[Any] = new_name.replace("norm" , "layernorm" )
lowerCamelCase_ : Optional[Any] = "efficientformer." + new_name
else:
lowerCamelCase_ : Optional[Any] = "efficientformer.encoder." + new_name
return new_name
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
for key in checkpoint.copy().keys():
lowerCamelCase_ : List[Any] = checkpoint.pop(__UpperCAmelCase )
lowerCamelCase_ : int = val
return checkpoint
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return image
def __a ( __UpperCAmelCase : Path , __UpperCAmelCase : Path , __UpperCAmelCase : Path , __UpperCAmelCase : bool ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = torch.load(__UpperCAmelCase , map_location="cpu" )["model"]
lowerCamelCase_ : str = EfficientFormerConfig.from_json_file(__UpperCAmelCase )
lowerCamelCase_ : int = EfficientFormerForImageClassificationWithTeacher(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
lowerCamelCase_ : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase_ : Dict = convert_torch_checkpoint(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
lowerCamelCase_ : Union[str, Any] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase_ : List[Any] = prepare_img()
lowerCamelCase_ : List[str] = 256
lowerCamelCase_ : Tuple = 224
lowerCamelCase_ : str = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
lowerCamelCase_ : Optional[int] = processor(images=__UpperCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
lowerCamelCase_ : Optional[Any] = Compose(
[
Resize(__UpperCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__UpperCAmelCase ),
ToTensor(),
Normalize(__UpperCAmelCase , __UpperCAmelCase ),
] )
lowerCamelCase_ : List[Any] = image_transforms(__UpperCAmelCase ).unsqueeze(0 )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = model(__UpperCAmelCase )
lowerCamelCase_ : int = outputs.logits
lowerCamelCase_ : int = (1, 1000)
if "l1" in model_name:
lowerCamelCase_ : Tuple = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase_ : Any = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __UpperCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase_ : Union[str, Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(__UpperCAmelCase )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=__UpperCAmelCase , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 488
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = IFImgaImgSuperResolutionPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any , __magic_name__ : Any=0 ) -> str:
if str(__magic_name__ ).startswith("mps" ):
lowerCamelCase_ : List[str] = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase_ : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase_ : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
self._test_save_load_local()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 488
| 1
|
from math import ceil, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int = 1_0_0_0_0_0_0 ) -> int:
A__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
A__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
A__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 714
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , lowercase=True , lowercase=1 / 255 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , lowercase , lowercase=False ) -> int:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(lowercase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase , key=lambda lowercase : item[0] )[0]
A__ = max(lowercase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = DetaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_rescale" ) )
self.assertTrue(hasattr(lowercase , "do_pad" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase , batched=lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 39769, "annotations": target}
# encode them
A__ = DetaImageProcessor()
A__ = image_processing(images=lowercase , annotations=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
@slow
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = DetaImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=lowercase , annotations=lowercase , masks_path=lowercase , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowercase )
A__ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase )
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase ) )
# verify masks
A__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase )
# verify orig_size
A__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase ) )
# verify size
A__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase ) )
| 626
| 0
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__A : Optional[int] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__A : Any = '|'.join(sys.argv[1:])
__A : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__A : Tuple = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 575
|
"""simple docstring"""
from math import factorial, pi
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = 30 ) ->float:
"""simple docstring"""
if not isinstance(_lowerCamelCase, (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(_lowerCamelCase, _lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
__lowercase : List[str] = float(_lowerCamelCase )
__lowercase : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowerCamelCase ) )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = 30 ) ->float:
"""simple docstring"""
if not isinstance(_lowerCamelCase, (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(_lowerCamelCase, _lowerCamelCase ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
__lowercase : List[str] = float(_lowerCamelCase )
__lowercase : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 575
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
__lowerCamelCase : Tuple =DPTConfig()
if "large" in checkpoint_url:
__lowerCamelCase : List[str] =1024
__lowerCamelCase : Dict =4096
__lowerCamelCase : Tuple =24
__lowerCamelCase : str =16
__lowerCamelCase : Optional[Any] =[5, 11, 17, 23]
__lowerCamelCase : List[Any] =[256, 512, 1024, 1024]
__lowerCamelCase : List[str] =(1, 384, 384)
if "ade" in checkpoint_url:
__lowerCamelCase : List[Any] =True
__lowerCamelCase : Union[str, Any] =150
__lowerCamelCase : Any ='''huggingface/label-files'''
__lowerCamelCase : List[str] ='''ade20k-id2label.json'''
__lowerCamelCase : Optional[Any] =json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
__lowerCamelCase : Union[str, Any] ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCamelCase : List[Any] =idalabel
__lowerCamelCase : Dict ={v: k for k, v in idalabel.items()}
__lowerCamelCase : str =[1, 150, 480, 480]
return config, expected_shape
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
__lowerCamelCase : List[Any] =['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCamelCase : str =name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
__lowerCamelCase : str =name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
__lowerCamelCase : List[str] =name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
__lowerCamelCase : str =name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
__lowerCamelCase : Any =name.replace('''proj''' , '''projection''' )
if "blocks" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase : Optional[Any] =name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
__lowerCamelCase : List[str] =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase : Tuple =name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
__lowerCamelCase : Tuple =name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
__lowerCamelCase : Optional[int] =name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
__lowerCamelCase : str =name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
__lowerCamelCase : List[str] =name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
__lowerCamelCase : Any =name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
__lowerCamelCase : int =name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
__lowerCamelCase : int =int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCamelCase : Dict =name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__lowerCamelCase : List[str] =name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
__lowerCamelCase : Optional[Any] =name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
__lowerCamelCase : str =name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
__lowerCamelCase : Any =name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
__lowerCamelCase : Optional[int] =name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCamelCase : Dict =name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCamelCase : List[str] =name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCamelCase : str =name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCamelCase : Optional[Any] =name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
__lowerCamelCase : Tuple =name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
__lowerCamelCase : List[Any] =name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
__lowerCamelCase : Tuple =name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
__lowerCamelCase : int =name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
__lowerCamelCase : int =name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
__lowerCamelCase : Dict =name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
__lowerCamelCase : str =name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
__lowerCamelCase : str =name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
__lowerCamelCase : Union[str, Any] =name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
__lowerCamelCase : List[str] =name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : int =state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
__lowerCamelCase : Union[str, Any] =state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : Tuple =in_proj_weight[: config.hidden_size, :]
__lowerCamelCase : List[str] =in_proj_bias[: config.hidden_size]
__lowerCamelCase : Optional[Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : List[Any] =in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase : Union[str, Any] =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ):
'''simple docstring'''
__lowerCamelCase : List[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowerCamelCase : Optional[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Tuple =get_dpt_config(SCREAMING_SNAKE_CASE )
# load original state_dict from URL
__lowerCamelCase : List[str] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase : str =state_dict.pop(SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any =val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
__lowerCamelCase : Dict =DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Check outputs on an image
__lowerCamelCase : Union[str, Any] =480 if '''ade''' in checkpoint_url else 384
__lowerCamelCase : Dict =DPTImageProcessor(size=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] =prepare_img()
__lowerCamelCase : int =image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
# forward pass
__lowerCamelCase : Tuple =model(**SCREAMING_SNAKE_CASE ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE ).predicted_depth
# Assert logits
__lowerCamelCase : Optional[Any] =torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
__lowerCamelCase : List[str] =torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE )
assert (
torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE )
)
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
_UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 363
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[str] = ["""pixel_values"""]
def __init__( self :List[str] , __lowercase :bool = True , __lowercase :Dict[str, int] = None , __lowercase :PILImageResampling = PIL.Image.BICUBIC , __lowercase :bool = True , __lowercase :Dict[str, int] = None , __lowercase :Union[int, float] = 1 / 255 , __lowercase :bool = True , __lowercase :bool = True , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , **__lowercase :Union[str, Any] , ):
super().__init__(**__lowercase )
__lowerCamelCase : Dict =size if size is not None else {'''height''': 256, '''width''': 256}
__lowerCamelCase : Union[str, Any] =get_size_dict(__lowercase )
__lowerCamelCase : Tuple =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase : int =get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCamelCase : Tuple =do_resize
__lowerCamelCase : int =size
__lowerCamelCase : Union[str, Any] =resample
__lowerCamelCase : Dict =do_center_crop
__lowerCamelCase : Any =crop_size
__lowerCamelCase : Dict =do_rescale
__lowerCamelCase : List[Any] =rescale_factor
__lowerCamelCase : Any =do_normalize
__lowerCamelCase : int =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : List[str] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowercase ( self :Union[str, Any] , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :PILImageResampling = PIL.Image.BICUBIC , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :Optional[int] , ):
__lowerCamelCase : Optional[int] =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
__lowercase , size=(size['''height'''], size['''width''']) , resample=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :Any , __lowercase :np.ndarray , __lowercase :Dict[str, int] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :Optional[Any] , ):
__lowerCamelCase : Dict =get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase )
def __lowercase ( self :int , __lowercase :np.ndarray , __lowercase :Union[int, float] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :List[Any] , ):
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :Dict , __lowercase :np.ndarray , __lowercase :Union[float, List[float]] , __lowercase :Union[float, List[float]] , __lowercase :Optional[Union[str, ChannelDimension]] = None , **__lowercase :int , ):
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def __lowercase ( self :Any , __lowercase :ImageInput , __lowercase :bool = None , __lowercase :Dict[str, int] = None , __lowercase :Optional[Any]=None , __lowercase :bool = None , __lowercase :Dict[str, int] = None , __lowercase :bool = None , __lowercase :float = None , __lowercase :bool = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[float, List[float]]] = None , __lowercase :Optional[Union[str, TensorType]] = None , __lowercase :ChannelDimension = ChannelDimension.FIRST , **__lowercase :Any , ):
__lowerCamelCase : Optional[Any] =do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : List[Any] =resample if resample is not None else self.resample
__lowerCamelCase : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : List[str] =do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : Dict =image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Union[str, Any] =image_std if image_std is not None else self.image_std
__lowerCamelCase : Dict =size if size is not None else self.size
__lowerCamelCase : int =get_size_dict(__lowercase )
__lowerCamelCase : List[Any] =crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Optional[int] =get_size_dict(__lowercase , param_name='''crop_size''' )
__lowerCamelCase : int =make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowerCamelCase : str =[to_numpy_array(__lowercase ) for image in images]
if do_resize:
__lowerCamelCase : Union[str, Any] =[self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__lowerCamelCase : Optional[Any] =[self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__lowerCamelCase : str =[self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__lowerCamelCase : int =[self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__lowerCamelCase : Tuple =[to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCamelCase : Any ={'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 363
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = "▁"
__lowerCamelCase : Tuple = {"vocab_file": "spiece.model"}
__lowerCamelCase : Optional[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
__lowerCamelCase : List[str] = {
"google/reformer-crime-and-punishment": 524_288,
}
class __magic_name__ ( snake_case_ ):
lowercase : str =VOCAB_FILES_NAMES
lowercase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Any =['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : Dict=[] , UpperCamelCase__ : Tuple = None , **UpperCamelCase__ : Tuple , ) -> None:
'''simple docstring'''
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict[str, int]:
'''simple docstring'''
UpperCAmelCase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Any , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.sp_model.piece_to_id(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
UpperCAmelCase = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 323
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_):
snake_case_ = DPTConfig()
if "large" in checkpoint_url:
snake_case_ = 10_24
snake_case_ = 40_96
snake_case_ = 24
snake_case_ = 16
snake_case_ = [5, 11, 17, 23]
snake_case_ = [2_56, 5_12, 10_24, 10_24]
snake_case_ = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
snake_case_ = True
snake_case_ = 1_50
snake_case_ = 'huggingface/label-files'
snake_case_ = 'ade20k-id2label.json'
snake_case_ = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type='dataset')) , 'r'))
snake_case_ = {int(a_): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __UpperCAmelCase ( a_):
snake_case_ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(a_ , a_)
def __UpperCAmelCase ( a_):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ = name.replace('pretrained.model' , 'dpt.encoder')
if "pretrained.model" in name:
snake_case_ = name.replace('pretrained.model' , 'dpt.embeddings')
if "patch_embed" in name:
snake_case_ = name.replace('patch_embed' , 'patch_embeddings')
if "pos_embed" in name:
snake_case_ = name.replace('pos_embed' , 'position_embeddings')
if "attn.proj" in name:
snake_case_ = name.replace('attn.proj' , 'attention.output.dense')
if "proj" in name and "project" not in name:
snake_case_ = name.replace('proj' , 'projection')
if "blocks" in name:
snake_case_ = name.replace('blocks' , 'layer')
if "mlp.fc1" in name:
snake_case_ = name.replace('mlp.fc1' , 'intermediate.dense')
if "mlp.fc2" in name:
snake_case_ = name.replace('mlp.fc2' , 'output.dense')
if "norm1" in name:
snake_case_ = name.replace('norm1' , 'layernorm_before')
if "norm2" in name:
snake_case_ = name.replace('norm2' , 'layernorm_after')
if "scratch.output_conv" in name:
snake_case_ = name.replace('scratch.output_conv' , 'head')
if "scratch" in name:
snake_case_ = name.replace('scratch' , 'neck')
if "layer1_rn" in name:
snake_case_ = name.replace('layer1_rn' , 'convs.0')
if "layer2_rn" in name:
snake_case_ = name.replace('layer2_rn' , 'convs.1')
if "layer3_rn" in name:
snake_case_ = name.replace('layer3_rn' , 'convs.2')
if "layer4_rn" in name:
snake_case_ = name.replace('layer4_rn' , 'convs.3')
if "refinenet" in name:
snake_case_ = int(name[len('neck.refinenet') : len('neck.refinenet') + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
snake_case_ = name.replace('out_conv' , 'projection')
if "resConfUnit1" in name:
snake_case_ = name.replace('resConfUnit1' , 'residual_layer1')
if "resConfUnit2" in name:
snake_case_ = name.replace('resConfUnit2' , 'residual_layer2')
if "conv1" in name:
snake_case_ = name.replace('conv1' , 'convolution1')
if "conv2" in name:
snake_case_ = name.replace('conv2' , 'convolution2')
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0')
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0')
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0')
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0')
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection')
if "pretrained.act_postprocess1.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize')
if "pretrained.act_postprocess2.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection')
if "pretrained.act_postprocess2.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize')
if "pretrained.act_postprocess3.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection')
if "pretrained.act_postprocess4.3" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection')
if "pretrained.act_postprocess4.4" in name:
snake_case_ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize')
if "pretrained" in name:
snake_case_ = name.replace('pretrained' , 'dpt')
if "bn" in name:
snake_case_ = name.replace('bn' , 'batch_norm')
if "head" in name:
snake_case_ = name.replace('head' , 'head.head')
if "encoder.norm" in name:
snake_case_ = name.replace('encoder.norm' , 'layernorm')
if "auxlayer" in name:
snake_case_ = name.replace('auxlayer' , 'auxiliary_head.head')
return name
def __UpperCAmelCase ( a_ , a_):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
snake_case_ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: config.hidden_size, :]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( ):
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ , snake_case_ = get_dpt_config(a_)
# load original state_dict from URL
snake_case_ = torch.hub.load_state_dict_from_url(a_ , map_location='cpu')
# remove certain keys
remove_ignore_keys_(a_)
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(a_)
snake_case_ = val
# read in qkv matrices
read_in_q_k_v(a_ , a_)
# load HuggingFace model
snake_case_ = DPTForSemanticSegmentation(a_) if 'ade' in checkpoint_url else DPTForDepthEstimation(a_)
model.load_state_dict(a_)
model.eval()
# Check outputs on an image
snake_case_ = 4_80 if 'ade' in checkpoint_url else 3_84
snake_case_ = DPTImageProcessor(size=a_)
snake_case_ = prepare_img()
snake_case_ = image_processor(a_ , return_tensors='pt')
# forward pass
snake_case_ = model(**a_).logits if 'ade' in checkpoint_url else model(**a_).predicted_depth
# Assert logits
snake_case_ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]])
if "ade" in checkpoint_url:
snake_case_ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]])
assert outputs.shape == torch.Size(a_)
assert (
torch.allclose(outputs[0, 0, :3, :3] , a_ , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , a_)
)
Path(a_).mkdir(exist_ok=a_)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(a_)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(a_)
if push_to_hub:
print('Pushing model to hub...')
model.push_to_hub(
repo_path_or_name=Path(a_ , a_) , organization='nielsr' , commit_message='Add model' , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
lowercase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 198
| 0
|
'''simple docstring'''
import pytest
snake_case_ : Union[str, Any] = '__dummy_dataset1__'
snake_case_ : Optional[Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __snake_case ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __snake_case ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __snake_case ( _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Any):
UpperCamelCase = dataset_loading_script_name
UpperCamelCase = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=_UpperCAmelCase)
UpperCamelCase = script_dir / f'{script_name}.py'
with open(_UpperCAmelCase, '''w''') as f:
f.write(_UpperCAmelCase)
return str(_UpperCAmelCase)
| 350
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=True , lowerCamelCase__=1 / 2_5_5 , lowerCamelCase__=True , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False ):
'''simple docstring'''
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
UpperCamelCase , UpperCamelCase = image.size
else:
UpperCamelCase , UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size['''shortest_edge'''] * h / w )
UpperCamelCase = self.size['''shortest_edge''']
elif w > h:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
UpperCamelCase = self.size['''shortest_edge''']
UpperCamelCase = self.size['''shortest_edge''']
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase , UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[0] )[0]
UpperCamelCase = max(lowerCamelCase__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_rescale''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
UpperCamelCase , UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
UpperCamelCase = DeformableDetrImageProcessor()
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' )
UpperCamelCase = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors='''pt''' )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCamelCase__ ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCamelCase__ )
UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCamelCase__ ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCamelCase__ ) )
# verify class_labels
UpperCamelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCamelCase__ ) )
# verify masks
UpperCamelCase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCamelCase__ )
# verify orig_size
UpperCamelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCamelCase__ ) )
# verify size
UpperCamelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCamelCase__ ) )
| 350
| 1
|
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self , A , A ) -> List[Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A ) for s in shape] )}.npy'
def snake_case_( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_( self , A=0 , A=(4, 4, 64, 64) , A=False ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return image
def snake_case_( self , A=False , A="CompVis/stable-diffusion-v1-4" ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE = """bf16""" if fpaa else None
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = FlaxUNetaDConditionModel.from_pretrained(
A , subfolder="""unet""" , dtype=A , revision=A )
return model, params
def snake_case_( self , A=0 , A=(4, 77, 768) , A=False ) -> Dict:
_SCREAMING_SNAKE_CASE = jnp.bfloataa if fpaa else jnp.floataa
_SCREAMING_SNAKE_CASE = jnp.array(load_hf_numpy(self.get_file_format(A , A ) ) , dtype=A )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case_( self , A , A , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=A )
_SCREAMING_SNAKE_CASE = self.get_latents(A , fpaa=A )
_SCREAMING_SNAKE_CASE = self.get_encoder_hidden_states(A , fpaa=A )
_SCREAMING_SNAKE_CASE = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
_SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(A , A , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case_( self , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=A )
_SCREAMING_SNAKE_CASE = self.get_latents(A , shape=(4, 4, 96, 96) , fpaa=A )
_SCREAMING_SNAKE_CASE = self.get_encoder_hidden_states(A , shape=(4, 77, 1024) , fpaa=A )
_SCREAMING_SNAKE_CASE = model.apply(
{"""params""": params} , A , jnp.array(A , dtype=jnp.intaa ) , encoder_hidden_states=A , ).sample
assert sample.shape == latents.shape
_SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE = jnp.array(A , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(A , A , atol=1e-2 )
| 314
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase_ = [0, 25, 50]
lowercase_ = [25, 50, 75]
lowercase_ = fuzz.membership.trimf(X, abca)
lowercase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase_ = np.ones(75)
lowercase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 314
| 1
|
lowercase_ = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCAmelCase ( ):
lowercase__ = """Morse code here!"""
print(lowercase_ )
lowercase__ = encrypt(lowercase_ )
print(lowercase_ )
lowercase__ = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 719
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowercase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : List[Any], __lowercase : int, __lowercase : Optional[int]=7, __lowercase : List[str]=3, __lowercase : Tuple=18, __lowercase : List[Any]=30, __lowercase : Tuple=400, __lowercase : Any=None, __lowercase : Optional[int]=True, __lowercase : List[str]=True, __lowercase : Union[str, Any]=None, ):
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1024, 2048, 4096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def A__ ( self : List[str] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def A__ ( self : Any ):
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(__lowercase, stream=__lowercase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Any =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Any ):
lowercase__ = PixaStructImageProcessingTester(self )
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Optional[int] ):
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2048
lowercase__ = image_processor(__lowercase, return_tensors="pt", max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0606 ), atol=1e-3, rtol=1e-3 ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : int ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase, header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Tuple ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def A__ ( self : Any ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =PixaStructImageProcessor if is_vision_available() else None
def A__ ( self : Optional[int] ):
lowercase__ = PixaStructImageProcessingTester(self, num_channels=4 )
lowercase__ = 3
@property
def A__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Dict ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "do_convert_rgb" ) )
def A__ ( self : Union[str, Any] ):
# Initialize image_processor
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0], return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowercase__ = image_processor(
__lowercase, return_tensors="pt", max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 37
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[Any] ={
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str =["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] =[
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int =[
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 113
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCAmelCase = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : Dict = PegasusConfig
_SCREAMING_SNAKE_CASE : Any = {}
_SCREAMING_SNAKE_CASE : List[str] = """gelu"""
def __init__( self :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :int=13 , __magic_name__ :int=7 , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[int]=False , __magic_name__ :Optional[Any]=99 , __magic_name__ :str=32 , __magic_name__ :List[str]=5 , __magic_name__ :Tuple=4 , __magic_name__ :Dict=37 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Tuple=20 , __magic_name__ :Tuple=2 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Optional[Any]=0 , ) ->Union[str, Any]:
lowercase : str = parent
lowercase : Tuple = batch_size
lowercase : Tuple = seq_length
lowercase : int = is_training
lowercase : List[Any] = use_labels
lowercase : Optional[int] = vocab_size
lowercase : Optional[Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : List[Any] = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : List[str] = max_position_embeddings
lowercase : Tuple = eos_token_id
lowercase : Union[str, Any] = pad_token_id
lowercase : Optional[Any] = bos_token_id
def __snake_case ( self :Any ) ->Dict:
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
lowercase : Dict = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
lowercase : Any = np.concatenate([input_ids, eos_tensor] , axis=1 )
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase : Optional[int] = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def __snake_case ( self :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Optional[Any] ) ->List[Any]:
lowercase : Optional[Any] = 20
lowercase : int = model_class_name(__UpperCamelCase )
lowercase : Optional[int] = model.encode(inputs_dict["""input_ids"""] )
lowercase , lowercase : int = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase : int = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowercase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : Dict = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase : Any = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
lowercase : Optional[Any] = model.decode(__UpperCamelCase , __UpperCamelCase )
lowercase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __snake_case ( self :Optional[int] , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :str ) ->Any:
lowercase : Any = 20
lowercase : str = model_class_name(__UpperCamelCase )
lowercase : List[Any] = model.encode(inputs_dict["""input_ids"""] )
lowercase , lowercase : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
lowercase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
lowercase : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowercase : int = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowercase : int = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
lowercase : Optional[Any] = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
lowercase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> Union[str, Any]:
if attention_mask is None:
lowercase : Optional[int] = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
lowercase : List[Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCamelCase (__snake_case , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def __snake_case ( self :Any ) ->Union[str, Any]:
lowercase : Tuple = FlaxPegasusModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=__UpperCamelCase )
def __snake_case ( self :Optional[Any] ) ->int:
self.config_tester.run_common_tests()
def __snake_case ( self :List[Any] ) ->List[Any]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __snake_case ( self :List[str] ) ->Any:
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __snake_case ( self :Any ) ->Optional[int]:
lowercase , lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowercase : Dict = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any]=None , **__magic_name__ :Tuple ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
lowercase : Any = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : Optional[int] = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __snake_case ( self :Optional[Any] ) ->List[Any]:
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : int = model_class(__UpperCamelCase )
lowercase : Tuple = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
lowercase : List[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :List[Any] ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
lowercase : Optional[int] = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase : Optional[int] = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __snake_case ( self :int ) ->Optional[int]:
for model_class_name in self.all_model_classes:
lowercase : int = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
lowercase : int = np.ones((1, 1) )
lowercase : Dict = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def __snake_case ( self :Optional[Any] ) ->Any:
lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
lowercase : str = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
lowercase : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase : str = [
"""California\'s largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.""",
]
lowercase : Dict = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=512 , padding=__UpperCamelCase )
lowercase : List[str] = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
lowercase : List[Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded
| 707
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
_lowerCAmelCase = logging.getLogger(__name__)
_lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Any = """bertabs"""
def __init__( self :str , __magic_name__ :Optional[int]=30_522 , __magic_name__ :int=512 , __magic_name__ :Optional[Any]=6 , __magic_name__ :Optional[Any]=512 , __magic_name__ :Optional[Any]=8 , __magic_name__ :int=512 , __magic_name__ :List[Any]=0.2 , __magic_name__ :Union[str, Any]=6 , __magic_name__ :int=768 , __magic_name__ :List[str]=8 , __magic_name__ :List[Any]=2_048 , __magic_name__ :Optional[Any]=0.2 , **__magic_name__ :Dict , ) ->Optional[int]:
super().__init__(**__magic_name__ )
lowercase : Optional[int] = vocab_size
lowercase : List[Any] = max_pos
lowercase : int = enc_layers
lowercase : str = enc_hidden_size
lowercase : List[str] = enc_heads
lowercase : List[Any] = enc_ff_size
lowercase : List[Any] = enc_dropout
lowercase : Any = dec_layers
lowercase : str = dec_hidden_size
lowercase : List[Any] = dec_heads
lowercase : Union[str, Any] = dec_ff_size
lowercase : Any = dec_dropout
| 348
| 0
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
a__ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
a__ : Dict = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
a__ : Optional[Any] = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = CamembertTokenizer
A : int = CamembertTokenizerFast
A : Any = True
A : Optional[Any] = True
def UpperCAmelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = CamembertTokenizer(lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>NOTUSED')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowerCAmelCase) , 10_04)
def UpperCAmelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_05)
def UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = CamembertTokenizer(lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
lowercase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname)
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(lowerCAmelCase)
lowercase__ = rust_tokenizer.encode(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
@slow
def UpperCAmelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowercase__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowerCAmelCase , )
| 622
|
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = """""".join(sorted(lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __A(lowerCAmelCase = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCamelCase = 0
_UpperCamelCase = 1
while True:
if check_bouncy(lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 612
| 0
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A( __UpperCAmelCase ):
__A = ["image_processor", "tokenizer"]
__A = "BlipImageProcessor"
__A = "AutoTokenizer"
def __init__( self, A, A ):
"""simple docstring"""
_UpperCamelCase = False
super().__init__(A, A )
_UpperCamelCase = self.image_processor
def __call__( self, A = None, A = None, A = True, A = False, A = None, A = None, A = 0, A = None, A = None, A = False, A = False, A = False, A = False, A = False, A = True, A = None, **A, ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(A, return_tensors=A )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def _UpperCamelCase ( self, *A, **A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A, **A )
def _UpperCamelCase ( self, *A, **A ):
"""simple docstring"""
return self.tokenizer.decode(*A, **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 105
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __A( unittest.TestCase ):
__A = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__A = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _UpperCamelCase ( self, A, A, A ):
"""simple docstring"""
_UpperCamelCase = TextaTextGenerationPipeline(model=A, tokenizer=A )
return generator, ["Something to write", "Something else"]
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = generator('''Something there''' )
self.assertEqual(A, [{'''generated_text''': ANY(A )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
_UpperCamelCase = generator(['''This is great !''', '''Something else'''], num_return_sequences=2, do_sample=A )
self.assertEqual(
A, [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
], )
_UpperCamelCase = generator(
['''This is great !''', '''Something else'''], num_return_sequences=2, batch_size=2, do_sample=A )
self.assertEqual(
A, [
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
[{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}],
], )
with self.assertRaises(A ):
generator(4 )
@require_torch
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''pt''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''', do_sample=A )
self.assertEqual(A, [{'''generated_text''': ''''''}] )
_UpperCamelCase = 3
_UpperCamelCase = generator(
'''Something there''', num_return_sequences=A, num_beams=A, )
_UpperCamelCase = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(A, A )
_UpperCamelCase = generator('''This is a test''', do_sample=A, num_return_sequences=2, return_tensors=A )
self.assertEqual(
A, [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
], )
_UpperCamelCase = generator.model.config.eos_token_id
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = generator(
['''This is a test''', '''This is a second test'''], do_sample=A, num_return_sequences=2, batch_size=2, return_tensors=A, )
self.assertEqual(
A, [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
], )
@require_tf
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''tf''' )
# do_sample=False necessary for reproducibility
_UpperCamelCase = generator('''Something there''', do_sample=A )
self.assertEqual(A, [{'''generated_text''': ''''''}] )
| 105
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["pixel_values"]
def __init__( self : Tuple ,A : bool = True ,A : Dict[str, int] = None ,A : PILImageResampling = PILImageResampling.BILINEAR ,A : bool = True ,A : Union[int, float] = 1 / 2_55 ,A : bool = True ,A : Dict[str, int] = None ,A : bool = True ,**A : List[Any] ,):
super().__init__(**A )
__A = size if size is not None else {"shortest_edge": 2_24}
__A = get_size_dict(A ,default_to_square=A )
__A = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
__A = get_size_dict(A ,param_name="crop_size" )
__A = do_resize
__A = size
__A = resample
__A = do_rescale
__A = rescale_factor
__A = do_center_crop
__A = crop_size
__A = do_flip_channel_order
def UpperCamelCase_ ( self : List[Any] ,A : np.ndarray ,A : Dict[str, int] ,A : PILImageResampling = PIL.Image.BILINEAR ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[Any] ,):
__A = get_size_dict(A ,default_to_square=A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__A = get_resize_output_image_size(A ,size=size["shortest_edge"] ,default_to_square=A )
return resize(A ,size=A ,resample=A ,data_format=A ,**A )
def UpperCamelCase_ ( self : Optional[int] ,A : np.ndarray ,A : Dict[str, int] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : Optional[int] ,):
__A = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(A ,size=(size["height"], size["width"]) ,data_format=A ,**A )
def UpperCamelCase_ ( self : str ,A : np.ndarray ,A : Union[int, float] ,A : Optional[Union[str, ChannelDimension]] = None ,**A : List[Any] ,):
return rescale(A ,scale=A ,data_format=A ,**A )
def UpperCamelCase_ ( self : Tuple ,A : np.ndarray ,A : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(A ,data_format=A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : ImageInput ,A : bool = None ,A : Dict[str, int] = None ,A : PILImageResampling = None ,A : bool = None ,A : float = None ,A : bool = None ,A : Dict[str, int] = None ,A : bool = None ,A : Optional[Union[str, TensorType]] = None ,A : ChannelDimension = ChannelDimension.FIRST ,**A : str ,):
__A = do_resize if do_resize is not None else self.do_resize
__A = resample if resample is not None else self.resample
__A = do_rescale if do_rescale is not None else self.do_rescale
__A = rescale_factor if rescale_factor is not None else self.rescale_factor
__A = do_center_crop if do_center_crop is not None else self.do_center_crop
__A = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__A = size if size is not None else self.size
__A = get_size_dict(A ,default_to_square=A )
__A = crop_size if crop_size is not None else self.crop_size
__A = get_size_dict(A ,param_name="crop_size" )
__A = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
__A = [to_numpy_array(A ) for image in images]
if do_resize:
__A = [self.resize(image=A ,size=A ,resample=A ) for image in images]
if do_center_crop:
__A = [self.center_crop(image=A ,size=A ) for image in images]
if do_rescale:
__A = [self.rescale(image=A ,scale=A ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__A = [self.flip_channel_order(image=A ) for image in images]
__A = [to_channel_dimension_format(A ,A ) for image in images]
__A = {"pixel_values": images}
return BatchFeature(data=A ,tensor_type=A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Optional[int] ,A : List[Tuple] = None ):
__A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A ) != len(A ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(A ):
__A = target_sizes.numpy()
__A = []
for idx in range(len(A ) ):
__A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode="bilinear" ,align_corners=A )
__A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A )
else:
__A = logits.argmax(dim=1 )
__A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 55
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE :Union[str, Any] = parse(importlib.metadata.version('''torch'''))
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Version] , lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = parse(importlib.metadata.version(lowerCAmelCase_ ) )
return operation(lowerCAmelCase_ , parse(lowerCAmelCase_ ) )
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str )->Any:
'''simple docstring'''
return compare_versions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 283
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = "informer"
_UpperCAmelCase :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : str , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "student_t" , snake_case__ : str = "nll" , snake_case__ : int = 1 , snake_case__ : List[int] = None , snake_case__ : Optional[Union[str, bool]] = "mean" , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : Optional[List[int]] = None , snake_case__ : Optional[List[int]] = None , snake_case__ : int = 64 , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : bool = True , snake_case__ : str = "gelu" , snake_case__ : float = 0.05 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 100 , snake_case__ : float = 0.02 , snake_case__ : Tuple=True , snake_case__ : str = "prob" , snake_case__ : int = 5 , snake_case__ : bool = True , **snake_case__ : Any , ):
# time series specific configuration
lowerCamelCase_ : List[Any] =prediction_length
lowerCamelCase_ : int =context_length or prediction_length
lowerCamelCase_ : Tuple =distribution_output
lowerCamelCase_ : Tuple =loss
lowerCamelCase_ : Tuple =input_size
lowerCamelCase_ : Optional[Any] =num_time_features
lowerCamelCase_ : Dict =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ : Optional[Any] =scaling
lowerCamelCase_ : Tuple =num_dynamic_real_features
lowerCamelCase_ : int =num_static_real_features
lowerCamelCase_ : List[str] =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Optional[int] =cardinality
else:
lowerCamelCase_ : Any =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : List[str] =embedding_dimension
else:
lowerCamelCase_ : List[str] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ : Optional[int] =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ : str =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ : List[str] =d_model
lowerCamelCase_ : Any =encoder_attention_heads
lowerCamelCase_ : Optional[int] =decoder_attention_heads
lowerCamelCase_ : Optional[int] =encoder_ffn_dim
lowerCamelCase_ : Optional[int] =decoder_ffn_dim
lowerCamelCase_ : List[str] =encoder_layers
lowerCamelCase_ : Dict =decoder_layers
lowerCamelCase_ : Dict =dropout
lowerCamelCase_ : Dict =attention_dropout
lowerCamelCase_ : List[Any] =activation_dropout
lowerCamelCase_ : Optional[Any] =encoder_layerdrop
lowerCamelCase_ : Optional[int] =decoder_layerdrop
lowerCamelCase_ : Any =activation_function
lowerCamelCase_ : List[str] =init_std
lowerCamelCase_ : str =use_cache
# Informer
lowerCamelCase_ : List[str] =attention_type
lowerCamelCase_ : Tuple =sampling_factor
lowerCamelCase_ : List[str] =distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 244
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A__ : Dict = logging.get_logger(__name__)
class lowercase__ ( snake_case__ ):
def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 244
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
__lowercase : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
__lowercase : List[Any] = downstream_dict["""projector.weight"""]
__lowercase : Union[str, Any] = downstream_dict["""projector.bias"""]
__lowercase : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
__lowercase : int = downstream_dict["""model.post_net.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ):
__lowercase : Optional[Any] = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
__lowercase : Union[str, Any] = downstream_dict["""model.linear.weight"""]
__lowercase : Tuple = downstream_dict["""model.linear.bias"""]
return model
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ):
__lowercase : Tuple = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase )
__lowercase : Optional[int] = downstream_dict["""connector.weight"""]
__lowercase : Any = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase : Optional[Any] = downstream_dict[
F"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase : List[Any] = downstream_dict[F"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
__lowercase : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
__lowercase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
__lowercase : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
__lowercase : int = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = torch.load(_lowerCAmelCase , map_location="""cpu""" )
__lowercase : Union[str, Any] = checkpoint["""Downstream"""]
__lowercase : Optional[int] = WavaVecaConfig.from_pretrained(_lowerCAmelCase )
__lowercase : int = WavaVecaFeatureExtractor.from_pretrained(
_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
__lowercase : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
__lowercase : int = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
__lowercase : Union[str, Any] = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
elif arch.endswith("""ForXVector""" ):
__lowercase : Dict = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
raise NotImplementedError(F"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase : List[str] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_lowerCAmelCase )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCamelCase : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 149
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569
| 0
|
'''simple docstring'''
def A ( A_ : int = 50 ):
snake_case : Any = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 555
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 555
| 1
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16000 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = int(round(sample_rate * max_length ) )
if len(_lowerCamelCase ) <= sample_length:
return wav
_lowerCAmelCase : Tuple = randint(0 , len(_lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(default=a__ , metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "A file containing the training audio paths and labels."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "A file containing the validation audio paths and labels."} )
_UpperCAmelCase = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
_UpperCAmelCase = field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
_UpperCAmelCase = field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
_UpperCAmelCase = field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_UpperCAmelCase = field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class __UpperCamelCase :
_UpperCAmelCase = field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_UpperCAmelCase = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_UpperCAmelCase = field(
default=a__ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __lowerCamelCase ( self ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' ,_A ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , _lowerCamelCase , _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCAmelCase : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCAmelCase : Any = DatasetDict()
_lowerCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
f"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCAmelCase : Tuple = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCAmelCase : Dict = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for audio in batch[data_args.audio_column_name]:
_lowerCAmelCase : Union[str, Any] = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCamelCase )
_lowerCAmelCase : str = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : Union[str, Any] = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Any = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = [audio['array'] for audio in batch[data_args.audio_column_name]]
_lowerCAmelCase : int = feature_extractor(_lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
_lowerCAmelCase : int = {model_input_name: inputs.get(_lowerCamelCase )}
_lowerCAmelCase : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCAmelCase : int = raw_datasets['train'].features[data_args.label_column_name].names
_lowerCAmelCase, _lowerCAmelCase : Dict = {}, {}
for i, label in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = label
# Load the accuracy metric from the datasets package
_lowerCAmelCase : Dict = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCAmelCase : str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCamelCase , references=eval_pred.label_ids )
_lowerCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel=_lowerCamelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCAmelCase : Any = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCAmelCase : str = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCAmelCase : List[Any] = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCamelCase , output_all_columns=_lowerCamelCase )
# Initialize our trainer
_lowerCAmelCase : Tuple = Trainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=_lowerCamelCase , tokenizer=_lowerCamelCase , )
# Training
if training_args.do_train:
_lowerCAmelCase : int = None
if training_args.resume_from_checkpoint is not None:
_lowerCAmelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCAmelCase : List[str] = last_checkpoint
_lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCAmelCase : Dict = trainer.evaluate()
trainer.log_metrics('eval' , _lowerCamelCase )
trainer.save_metrics('eval' , _lowerCamelCase )
# Write model card and (optionally) push to hub
_lowerCAmelCase : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCamelCase )
else:
trainer.create_model_card(**_lowerCamelCase )
if __name__ == "__main__":
main()
| 259
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __UpperCamelCase ( a__ ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parser.add_parser(
'convert' ,help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' ,)
train_parser.add_argument('--model_type' ,type=_A ,required=_A ,help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' ,type=_A ,required=_A ,help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' ,type=_A ,required=_A ,help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' ,type=_A ,default='' ,help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' ,type=_A ,default=_A ,help='Optional fine-tuning task name if the TF model was a finetuned model.' ,)
train_parser.set_defaults(func=_A )
def __init__( self ,_A ,_A ,_A ,_A ,_A ,*_A ,):
'''simple docstring'''
_lowerCAmelCase : str = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"""Loading model {model_type}""" )
_lowerCAmelCase : int = model_type
_lowerCAmelCase : Optional[Any] = tf_checkpoint
_lowerCAmelCase : Union[str, Any] = pytorch_dump_output
_lowerCAmelCase : Optional[int] = config
_lowerCAmelCase : Optional[Any] = finetuning_task_name
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase : List[Any] = self._tf_checkpoint
_lowerCAmelCase : Union[str, Any] = ''
else:
_lowerCAmelCase : Optional[int] = self._tf_checkpoint
_lowerCAmelCase : Dict = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A ,self._config ,self._pytorch_dump_output ,_A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 259
| 1
|
"""simple docstring"""
def __snake_case ( UpperCamelCase ) -> int:
"""simple docstring"""
a__ , a__ = [], []
while len(UpperCamelCase ) > 1:
a__ , a__ = min(UpperCamelCase ), max(UpperCamelCase )
start.append(UpperCamelCase )
end.append(UpperCamelCase )
collection.remove(UpperCamelCase )
collection.remove(UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 158
|
"""simple docstring"""
import math
import qiskit
def __snake_case ( UpperCamelCase = 1 , UpperCamelCase = 1 , UpperCamelCase = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
or isinstance(UpperCamelCase , UpperCamelCase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != input_a)
or (math.floor(UpperCamelCase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
a__ = qiskit.QuantumRegister(4 , '''qr''' )
a__ = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
a__ = [input_a, input_a, carry_in]
a__ = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCamelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCamelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCamelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , UpperCamelCase ) # measure the last two qbits
a__ = qiskit.Aer.get_backend('''aer_simulator''' )
a__ = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1_000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 158
| 1
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self :Dict , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any]=13 , __magic_name__ :Tuple=30 , __magic_name__ :List[Any]=2 , __magic_name__ :Optional[int]=3 , __magic_name__ :List[Any]=True , __magic_name__ :Any=True , __magic_name__ :Any=32 , __magic_name__ :List[str]=2 , __magic_name__ :Tuple=4 , __magic_name__ :Optional[int]=37 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :Union[str, Any]=10 , __magic_name__ :str=0.02 , __magic_name__ :Tuple=3 , __magic_name__ :Any=0.6 , __magic_name__ :List[Any]=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = mask_ratio
a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase__ ( self :int , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = TFViTMAEModel(config=__magic_name__ )
a = model(__magic_name__ , training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self :Any , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Dict ):
'''simple docstring'''
a = TFViTMAEForPreTraining(__magic_name__ )
a = model(__magic_name__ , training=__magic_name__ )
# expected sequence length = num_patches
a = (self.image_size // self.patch_size) ** 2
a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
a = 1
a = TFViTMAEForPreTraining(__magic_name__ )
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(__magic_name__ , training=__magic_name__ )
a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
((a) , (a) , (a)) = config_and_inputs
a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCamelCase__ = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TFViTMAEModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , tf.keras.layers.Layer ) )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
np.random.seed(2 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = int((config.image_size // config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = model(__magic_name__ , noise=__magic_name__ )
a = copy.deepcopy(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
a = model(**__magic_name__ , noise=__magic_name__ )
a = outputs_dict[0].numpy()
a = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
np.random.seed(2 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = int((config.image_size // config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__magic_name__ :Optional[int] ):
a = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__magic_name__ ):
a = v.numpy()
else:
a = np.array(__magic_name__ )
return inputs_np_dict
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = prepare_numpy_arrays(__magic_name__ )
a = model(__magic_name__ , noise=__magic_name__ )
a = model(**__magic_name__ , noise=__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[str] , __magic_name__ :List[str] , __magic_name__ :str ):
'''simple docstring'''
np.random.seed(2 )
a = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a = tf.constant(__magic_name__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a = tf_noise
super().check_pt_tf_models(__magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
np.random.seed(2 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(__magic_name__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(__magic_name__ , __magic_name__ ),)
if isinstance(__magic_name__ , __magic_name__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__magic_name__ , """_keras_serializable""" , __magic_name__ )
}
a = int((config.image_size // config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
a = tf.convert_to_tensor(__magic_name__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
a = main_layer_class(__magic_name__ )
a = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
a = tf.keras.Model(__magic_name__ , outputs=main_layer(__magic_name__ ) )
a = model(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
a = os.path.join(__magic_name__ , """keras_model.h5""" )
model.save(__magic_name__ )
a = tf.keras.models.load_model(
__magic_name__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(__magic_name__ , tf.keras.Model )
a = model(__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
np.random.seed(2 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = int((config.image_size // config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = model(__magic_name__ , noise=__magic_name__ )
if model_class.__name__ == "TFViTMAEModel":
a = outputs.last_hidden_state.numpy()
a = 0
else:
a = outputs.logits.numpy()
a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ , saved_model=__magic_name__ )
a = model_class.from_pretrained(__magic_name__ )
a = model(__magic_name__ , noise=__magic_name__ )
if model_class.__name__ == "TFViTMAEModel":
a = after_outputs["""last_hidden_state"""].numpy()
a = 0
else:
a = after_outputs["""logits"""].numpy()
a = 0
a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1E-5 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
np.random.seed(2 )
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = int((config.image_size // config.patch_size) ** 2 )
a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
a = model_class(__magic_name__ )
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = model(__magic_name__ , noise=__magic_name__ )
a = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__magic_name__ )
a = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
a = model_class.from_config(model.config )
a = new_model(__magic_name__ ) # Build model
new_model.set_weights(model.get_weights() )
a = new_model(__magic_name__ , noise=__magic_name__ )
self.assert_outputs_same(__magic_name__ , __magic_name__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__magic_name__ )
def __A ( ) -> Tuple:
a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
np.random.seed(2 )
a = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__magic_name__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a = ViTMAEConfig()
a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
a = np.random.uniform(size=(1, num_patches) )
# forward pass
a = model(**__magic_name__ , noise=__magic_name__ )
# verify the logits
a = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , __magic_name__ )
a = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __magic_name__ , atol=1E-4 )
| 468
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( enum.Enum ):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''generated'''
def __init__( self :Any , *__magic_name__ :Tuple , **__magic_name__ :Tuple ):
'''simple docstring'''
super().__init__(*__magic_name__ , **__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Any=None , __magic_name__ :Optional[Any]=None , __magic_name__ :Any=None , __magic_name__ :List[str]=None , __magic_name__ :Tuple=None , __magic_name__ :str=None , **__magic_name__ :List[Any] , ):
'''simple docstring'''
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
if len(__magic_name__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self :Dict , *__magic_name__ :Optional[int] , __magic_name__ :List[str] ):
'''simple docstring'''
a = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __magic_name__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , __magic_name__ ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
a = self.tokenizer(*__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self :Tuple , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
a = super().__call__(*__magic_name__ , **__magic_name__ )
if (
isinstance(args[0] , __magic_name__ )
and all(isinstance(__magic_name__ , __magic_name__ ) for el in args[0] )
and all(len(__magic_name__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCamelCase__ ( self :Dict , __magic_name__ :Optional[Any] , __magic_name__ :List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **__magic_name__ :Any ):
'''simple docstring'''
a = self._parse_and_tokenize(__magic_name__ , truncation=__magic_name__ , **__magic_name__ )
return inputs
def lowerCamelCase__ ( self :Any , __magic_name__ :int , **__magic_name__ :int ):
'''simple docstring'''
if self.framework == "pt":
a , a = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
a , a = tf.shape(model_inputs["""input_ids"""] ).numpy()
a = generate_kwargs.get("""min_length""" , self.model.config.min_length )
a = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__magic_name__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
a = self.model.generate(**__magic_name__ , **__magic_name__ )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(__magic_name__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(__magic_name__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :Any=ReturnType.TEXT , __magic_name__ :int=False ):
'''simple docstring'''
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
a = {
F'{self.return_name}_text': self.tokenizer.decode(
__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ , )
}
records.append(__magic_name__ )
return records
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''summary'''
def __call__( self :Any , *__magic_name__ :List[str] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
def lowerCamelCase__ ( self :Any , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''translation'''
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def lowerCamelCase__ ( self :str , *__magic_name__ :Union[str, Any] , __magic_name__ :Any=TruncationStrategy.DO_NOT_TRUNCATE , __magic_name__ :Optional[Any]=None , __magic_name__ :List[str]=None ):
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __magic_name__ ):
return self.tokenizer._build_translation_inputs(
*__magic_name__ , return_tensors=self.framework , truncation=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
else:
return super()._parse_and_tokenize(*__magic_name__ , truncation=__magic_name__ )
def lowerCamelCase__ ( self :int , __magic_name__ :List[str]=None , __magic_name__ :Union[str, Any]=None , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a , a , a = super()._sanitize_parameters(**__magic_name__ )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get("""task""" , self.task )
a = task.split("""_""" )
if task and len(__magic_name__ ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self :Optional[Any] , *__magic_name__ :Any , **__magic_name__ :str ):
'''simple docstring'''
return super().__call__(*__magic_name__ , **__magic_name__ )
| 468
| 1
|
import argparse
import os
import re
__lowerCAmelCase = """src/transformers"""
# Pattern that looks at the indentation in a line.
__lowerCAmelCase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
__lowerCAmelCase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowerCAmelCase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
__lowerCAmelCase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowerCAmelCase = re.compile(r"""\[([^\]]+)\]""")
def _lowercase ( a__ : List[str] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = _re_indent.search(a__ )
return "" if search is None else search.groups()[0]
def _lowercase ( a__ : int , a__ : Optional[int]="" , a__ : Tuple=None , a__ : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(a__ ):
index += 1
_UpperCamelCase = ["\n".join(lines[:index] )]
else:
_UpperCamelCase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_UpperCamelCase = [lines[index]]
index += 1
while index < len(a__ ) and (end_prompt is None or not lines[index].startswith(a__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(a__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(a__ ) )
if index < len(a__ ) - 1:
_UpperCamelCase = [lines[index + 1]]
index += 1
else:
_UpperCamelCase = []
else:
blocks.append("\n".join(a__ ) )
_UpperCamelCase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(a__ ) > 0:
blocks.append("\n".join(a__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(a__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _lowercase ( a__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(a__ : Union[str, Any] ):
return key(a__ ).lower().replace("_" , "" )
return _inner
def _lowercase ( a__ : int , a__ : str=None ) -> Tuple:
"""simple docstring"""
def noop(a__ : Union[str, Any] ):
return x
if key is None:
_UpperCamelCase = noop
# Constants are all uppercase, they go first.
_UpperCamelCase = [obj for obj in objects if key(a__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_UpperCamelCase = [obj for obj in objects if key(a__ )[0].isupper() and not key(a__ ).isupper()]
# Functions begin with a lowercase, they go last.
_UpperCamelCase = [obj for obj in objects if not key(a__ )[0].isupper()]
_UpperCamelCase = ignore_underscore(a__ )
return sorted(a__ , key=a__ ) + sorted(a__ , key=a__ ) + sorted(a__ , key=a__ )
def _lowercase ( a__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
def _replace(a__ : Union[str, Any] ):
_UpperCamelCase = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(a__ )] ) + "]"
_UpperCamelCase = import_statement.split("\n" )
if len(a__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_UpperCamelCase = 2 if lines[1].strip() == "[" else 1
_UpperCamelCase = [(i, _re_strip_line.search(a__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_UpperCamelCase = sort_objects(a__ , key=lambda a__ : x[1] )
_UpperCamelCase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(a__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_UpperCamelCase = _re_bracket_content.sub(_replace , lines[1] )
else:
_UpperCamelCase = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_UpperCamelCase = keys[:-1]
_UpperCamelCase = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(a__ )] )
return "\n".join(a__ )
else:
# Finally we have to deal with imports fitting on one line
_UpperCamelCase = _re_bracket_content.sub(_replace , a__ )
return import_statement
def _lowercase ( a__ : Tuple , a__ : List[Any]=True ) -> Any:
"""simple docstring"""
with open(a__ , encoding="utf-8" ) as f:
_UpperCamelCase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_UpperCamelCase = split_code_in_indented_blocks(
a__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(a__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_UpperCamelCase = main_blocks[block_idx]
_UpperCamelCase = block.split("\n" )
# Get to the start of the imports.
_UpperCamelCase = 0
while line_idx < len(a__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_UpperCamelCase = len(a__ )
else:
line_idx += 1
if line_idx >= len(a__ ):
continue
# Ignore beginning and last line: they don't contain anything.
_UpperCamelCase = "\n".join(block_lines[line_idx:-1] )
_UpperCamelCase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_UpperCamelCase = split_code_in_indented_blocks(a__ , indent_level=a__ )
# We have two categories of import key: list or _import_structure[key].append/extend
_UpperCamelCase = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_UpperCamelCase = [(pattern.search(a__ ).groups()[0] if pattern.search(a__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_UpperCamelCase = [(i, key) for i, key in enumerate(a__ ) if key is not None]
_UpperCamelCase = [x[0] for x in sorted(a__ , key=lambda a__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_UpperCamelCase = 0
_UpperCamelCase = []
for i in range(len(a__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_UpperCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(a__ )
count += 1
# And we put our main block back together with its first and last line.
_UpperCamelCase = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(a__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(a__ ) )
def _lowercase ( a__ : Any=True ) -> int:
"""simple docstring"""
_UpperCamelCase = []
for root, _, files in os.walk(a__ ):
if "__init__.py" in files:
_UpperCamelCase = sort_imports(os.path.join(a__ , "__init__.py" ) , check_only=a__ )
if result:
_UpperCamelCase = [os.path.join(a__ , "__init__.py" )]
if len(a__ ) > 0:
raise ValueError(f'''Would overwrite {len(a__ )} files, run `make style`.''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
__lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 589
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
__lowerCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = cn.convert_to_negative(a__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a__ , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCamelCase = canny.canny(a__ )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
assert gg.gaussian_filter(a__ , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCamelCase = conv.img_convolve(a__ , a__ ).astype(a__ )
assert res.any()
def _lowercase ( ) -> int:
"""simple docstring"""
assert med.median_filter(a__ , 3 ).any()
def _lowercase ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = sob.sobel_filter(a__ )
assert grad.any() and theta.any()
def _lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = sp.make_sepia(a__ , 20 )
assert sepia.all()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = bs.Burkes(imread(a__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( a__ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = rs.NearestNeighbour(imread(a__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_UpperCamelCase = imread(a__ , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = image[x_coordinate][y_coordinate]
_UpperCamelCase = lbp.get_neighbors_pixel(
a__ , a__ , a__ , a__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCamelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCamelCase = lbp.local_binary_value(a__ , a__ , a__ )
assert lbp_image.any()
| 589
| 1
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : str ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , "num_encoder_blocks" ) )
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any]=13 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=4 , lowerCamelCase : Optional[int]=[2, 2, 2, 2] , lowerCamelCase : List[str]=[8, 4, 2, 1] , lowerCamelCase : int=[16, 32, 64, 128] , lowerCamelCase : int=[1, 4, 8, 16] , lowerCamelCase : int=[1, 2, 4, 8] , lowerCamelCase : str=True , lowerCamelCase : List[Any]=True , lowerCamelCase : int="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : List[Any]=None , ) -> Dict:
__snake_case : Dict = parent
__snake_case : int = batch_size
__snake_case : int = image_size
__snake_case : Optional[Any] = num_channels
__snake_case : Tuple = num_encoder_blocks
__snake_case : int = sr_ratios
__snake_case : int = depths
__snake_case : Optional[Any] = hidden_sizes
__snake_case : Optional[int] = downsampling_rates
__snake_case : Optional[int] = num_attention_heads
__snake_case : Tuple = is_training
__snake_case : Optional[Any] = use_labels
__snake_case : Optional[int] = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : List[Any] = initializer_range
__snake_case : int = num_labels
__snake_case : List[str] = scope
def __snake_case ( self : Dict ) -> Any:
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Dict = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
__snake_case : Dict = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[Any] = model(lowerCamelCase )
__snake_case : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int ) -> str:
__snake_case : Any = self.num_labels
__snake_case : List[str] = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__snake_case : Optional[int] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Any ) -> List[str]:
__snake_case : Any = 1
__snake_case : Any = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Tuple = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
__snake_case : Any = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Tuple = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Any = False
def __snake_case ( self : str ) -> Dict:
__snake_case : Dict = SegformerModelTester(self )
__snake_case : Tuple = SegformerConfigTester(self , config_class=lowerCamelCase )
def __snake_case ( self : str ) -> int:
self.config_tester.run_common_tests()
def __snake_case ( self : int ) -> Any:
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __snake_case ( self : Dict ) -> Any:
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def __snake_case ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def __snake_case ( self : int ) -> Optional[int]:
pass
def __snake_case ( self : Optional[int] ) -> Optional[int]:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(lowerCamelCase )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
for model_class in self.all_model_classes:
__snake_case : Dict = True
__snake_case : str = False
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : str = outputs.attentions
__snake_case : str = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : int = True
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
__snake_case : Optional[Any] = (self.model_tester.image_size // 4) ** 2
__snake_case : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
__snake_case : List[Any] = (self.model_tester.image_size // 32) ** 2
__snake_case : Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
__snake_case : Dict = len(lowerCamelCase )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = True
__snake_case : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase ) )
__snake_case : Optional[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first attentions (first block, first layer)
__snake_case : Any = (self.model_tester.image_size // 4) ** 2
__snake_case : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __snake_case ( self : Tuple ) -> Union[str, Any]:
def check_hidden_states_output(lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str ):
__snake_case : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__snake_case : int = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__snake_case : Tuple = outputs.hidden_states
__snake_case : int = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : int = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __snake_case ( self : List[str] ) -> List[str]:
if not self.model_tester.is_training:
return
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
__snake_case : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__snake_case : int = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__snake_case : int = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case ( self : Tuple ) -> Optional[int]:
pass
@slow
def __snake_case ( self : int ) -> Dict:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Optional[int] ) -> Union[str, Any]:
# only resize + normalize
__snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase )
__snake_case : Any = prepare_img()
__snake_case : Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : Optional[int] = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : List[Any] = model(lowerCamelCase )
__snake_case : Dict = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
# only resize + normalize
__snake_case : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : List[str] = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCamelCase )
__snake_case : int = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : str = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : List[str] = model(lowerCamelCase )
__snake_case : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__snake_case : Tuple = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-1 ) )
@slow
def __snake_case ( self : str ) -> List[str]:
# only resize + normalize
__snake_case : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
__snake_case : Dict = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
lowerCamelCase )
__snake_case : Any = prepare_img()
__snake_case : List[Any] = image_processor(images=lowerCamelCase , return_tensors="pt" )
__snake_case : Optional[Any] = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase )
__snake_case : List[Any] = outputs.logits.detach().cpu()
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
__snake_case : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__snake_case : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__snake_case : Optional[Any] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 81
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Dict = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "unispeech-sat"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=504 , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : Any = hidden_size
_lowerCAmelCase : int = feat_extract_norm
_lowerCAmelCase : Any = feat_extract_activation
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : List[str] = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : str = conv_bias
_lowerCAmelCase : Optional[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Union[str, Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : Any = attention_dropout
_lowerCAmelCase : Optional[Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : Union[str, Any] = layerdrop
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : str = num_clusters
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Optional[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Tuple = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : List[Any] = mask_time_length
_lowerCAmelCase : List[Any] = mask_time_min_masks
_lowerCAmelCase : Optional[Any] = mask_feature_prob
_lowerCAmelCase : str = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : int = num_codevectors_per_group
_lowerCAmelCase : Tuple = num_codevector_groups
_lowerCAmelCase : str = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Any = num_negatives
_lowerCAmelCase : Optional[int] = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Union[str, Any] = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : int = list(a__ )
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : Union[str, Any] = list(a__ )
_lowerCAmelCase : List[str] = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 213
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
UpperCAmelCase__ : list[float] = list(_UpperCAmelCase )
UpperCAmelCase__ : Optional[Any] = degree
def __add__( self , _UpperCAmelCase ):
if self.degree > polynomial_a.degree:
UpperCAmelCase__ : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
UpperCAmelCase__ : List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self , _UpperCAmelCase ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , _UpperCAmelCase ):
UpperCAmelCase__ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase ):
UpperCAmelCase__ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
UpperCAmelCase__ : Union[str, Any] = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self ):
return self.__str__()
def lowerCamelCase ( self ):
UpperCAmelCase__ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase__ : int = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def lowerCamelCase ( self , _UpperCAmelCase = 0 ):
UpperCAmelCase__ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase__ : Optional[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase__ : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , _UpperCAmelCase ):
return not self.__eq__(_UpperCAmelCase )
| 720
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCAmelCase__ ( a_ : bytes , a_ : int ) -> np.array:
UpperCAmelCase__ : Union[str, Any] = f"""{sampling_rate}"""
UpperCAmelCase__ : List[Any] = '''1'''
UpperCAmelCase__ : int = '''f32le'''
UpperCAmelCase__ : Tuple = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase__ : Dict = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
UpperCAmelCase__ : Dict = output_stream[0]
UpperCAmelCase__ : int = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : str = "f32le" , ) -> List[str]:
UpperCAmelCase__ : str = f"""{sampling_rate}"""
UpperCAmelCase__ : Tuple = '''1'''
if format_for_conversion == "s16le":
UpperCAmelCase__ : str = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
UpperCAmelCase__ : Dict = platform.system()
if system == "Linux":
UpperCAmelCase__ : Union[str, Any] = '''alsa'''
UpperCAmelCase__ : List[Any] = '''default'''
elif system == "Darwin":
UpperCAmelCase__ : List[str] = '''avfoundation'''
UpperCAmelCase__ : List[Any] = ''':0'''
elif system == "Windows":
UpperCAmelCase__ : Optional[int] = '''dshow'''
UpperCAmelCase__ : Any = '''default'''
UpperCAmelCase__ : str = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
UpperCAmelCase__ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase__ : Dict = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def lowerCAmelCase__ ( a_ : int , a_ : float , a_ : Optional[int] = None , a_ : Optional[Union[Tuple[float, float], float]] = None , a_ : str = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCAmelCase__ : int = stream_chunk_s
else:
UpperCAmelCase__ : str = chunk_length_s
UpperCAmelCase__ : Any = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
UpperCAmelCase__ : Dict = np.intaa
UpperCAmelCase__ : List[Any] = 2
elif format_for_conversion == "f32le":
UpperCAmelCase__ : Tuple = np.floataa
UpperCAmelCase__ : List[str] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
UpperCAmelCase__ : Any = chunk_length_s / 6
UpperCAmelCase__ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
UpperCAmelCase__ : int = [stride_length_s, stride_length_s]
UpperCAmelCase__ : List[str] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase__ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase__ : Optional[int] = datetime.datetime.now()
UpperCAmelCase__ : Dict = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
UpperCAmelCase__ : str = np.frombuffer(item['''raw'''] , dtype=a_ )
UpperCAmelCase__ : Any = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
UpperCAmelCase__ : List[Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int , a_ : Tuple[int, int] , a_ : bool = False ) -> Any:
UpperCAmelCase__ : Union[str, Any] = B''''''
UpperCAmelCase__ , UpperCAmelCase__ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
UpperCAmelCase__ : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
UpperCAmelCase__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase__ : Union[str, Any] = (_stride_left, stride_right)
UpperCAmelCase__ : int = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
UpperCAmelCase__ : List[Any] = False
yield item
UpperCAmelCase__ : Optional[int] = stride_left
UpperCAmelCase__ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
UpperCAmelCase__ : List[str] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
UpperCAmelCase__ : Optional[Any] = False
yield item
def lowerCAmelCase__ ( a_ : str , a_ : int ) -> Any:
UpperCAmelCase__ : str = 2**2_4 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
UpperCAmelCase__ : Tuple = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 599
| 0
|
from string import ascii_uppercase
SCREAMING_SNAKE_CASE = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE = dict(enumerate(ascii_uppercase))
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
__a = 0
while True:
if x == i:
__a = 0
if len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ):
break
key += key[i]
i += 1
return key
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = """"""
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = """"""
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a ():
__a = """THE GERMAN ATTACK"""
__a = """SECRET"""
__a = generate_key(lowerCAmelCase__ , lowerCAmelCase__ )
__a = cipher_text(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''Encrypted Text = {s}''' )
print(f'''Original Text = {original_text(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 99
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : int = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class a__( snake_case__ , unittest.TestCase ):
a_ : List[Any] = GPTSwaTokenizer
a_ : int = False
a_ : List[Any] = True
a_ : Any = False
def _lowercase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , _UpperCAmelCase ) -> Any:
snake_case__ ='This is a test'
snake_case__ ='This is a test'
return input_text, output_text
def _lowercase ( self ) -> str:
snake_case__ ='<s>'
snake_case__ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 2000 )
def _lowercase ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _lowercase ( self ) -> Any:
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase )
snake_case__ =tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [465, 287, 265, 631, 842] )
snake_case__ =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
snake_case__ =tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
snake_case__ =tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
# fmt: off
self.assertListEqual(
_UpperCAmelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def _lowercase ( self ) -> Optional[Any]:
snake_case__ =GPTSwaTokenizer(_UpperCAmelCase )
snake_case__ =['This is a test', 'I was born in 92000, and this is falsé.']
snake_case__ =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCAmelCase ) , _UpperCAmelCase )
@slow
def _lowercase ( self ) -> Dict:
snake_case__ =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
snake_case__ ={'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_UpperCAmelCase , )
| 538
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case: Tuple = logging.get_logger(__name__)
__snake_case: str = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = "deta"
a_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=9_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=6 , lowerCAmelCase_=20_48 , lowerCAmelCase_=8 , lowerCAmelCase_=6 , lowerCAmelCase_=10_24 , lowerCAmelCase_=8 , lowerCAmelCase_=0.0 , lowerCAmelCase_=True , lowerCAmelCase_="relu" , lowerCAmelCase_=2_56 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="sine" , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_=True , lowerCAmelCase_=3_00 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=1 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.25 , **lowerCAmelCase_ , ):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
a_ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
a_ : Any = backbone_config.pop("""model_type""" )
a_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
a_ : Tuple = config_class.from_dict(lowerCAmelCase_ )
a_ : Any = backbone_config
a_ : List[Any] = num_queries
a_ : List[str] = max_position_embeddings
a_ : str = d_model
a_ : int = encoder_ffn_dim
a_ : Union[str, Any] = encoder_layers
a_ : Union[str, Any] = encoder_attention_heads
a_ : Optional[int] = decoder_ffn_dim
a_ : int = decoder_layers
a_ : str = decoder_attention_heads
a_ : str = dropout
a_ : Optional[Any] = attention_dropout
a_ : Dict = activation_dropout
a_ : Dict = activation_function
a_ : Any = init_std
a_ : Optional[int] = init_xavier_std
a_ : Tuple = encoder_layerdrop
a_ : List[str] = auxiliary_loss
a_ : Dict = position_embedding_type
# deformable attributes
a_ : Tuple = num_feature_levels
a_ : Optional[int] = encoder_n_points
a_ : List[Any] = decoder_n_points
a_ : Optional[Any] = two_stage
a_ : Optional[int] = two_stage_num_proposals
a_ : Optional[Any] = with_box_refine
a_ : int = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
a_ : str = class_cost
a_ : List[Any] = bbox_cost
a_ : Dict = giou_cost
# Loss coefficients
a_ : Optional[Any] = mask_loss_coefficient
a_ : int = dice_loss_coefficient
a_ : int = bbox_loss_coefficient
a_ : Any = giou_loss_coefficient
a_ : Tuple = eos_coefficient
a_ : Optional[int] = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[str] = copy.deepcopy(self.__dict__ )
a_ : int = self.backbone_config.to_dict()
a_ : Dict = self.__class__.model_type
return output
| 460
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case: Dict = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[str] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Any = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[str] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__snake_case: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 460
| 1
|
__snake_case : List[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__snake_case : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__snake_case : str = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 540
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
with open(a__) as metadata_file:
a_ : Any = json.load(a__)
a_ : Dict = LukeConfig(use_entity_aware_attention=a__ , **metadata["""model_config"""])
# Load in the weights from the checkpoint_path
a_ : str = torch.load(a__ , map_location="""cpu""")
# Load the entity vocab file
a_ : List[str] = load_entity_vocab(a__)
a_ : int = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""])
# Add special tokens to the token vocabulary for downstream tasks
a_ : Optional[Any] = AddedToken("""<ent>""" , lstrip=a__ , rstrip=a__)
a_ : int = AddedToken("""<ent2>""" , lstrip=a__ , rstrip=a__)
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''')
tokenizer.save_pretrained(a__)
with open(os.path.join(a__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""]) , """w""") as f:
json.dump(a__ , a__)
a_ : List[str] = LukeTokenizer.from_pretrained(a__)
# Initialize the embeddings of the special tokens
a_ : Optional[int] = state_dict["""embeddings.word_embeddings.weight"""]
a_ : List[str] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""])[0]].unsqueeze(0)
a_ : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""])[0]].unsqueeze(0)
a_ : Optional[Any] = torch.cat([word_emb, ent_emb, enta_emb])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
a_ : Any = f'''encoder.layer.{layer_index}.attention.self.'''
a_ : List[str] = state_dict[prefix + matrix_name]
a_ : List[Any] = state_dict[prefix + matrix_name]
a_ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ : str = state_dict["""entity_embeddings.entity_embeddings.weight"""]
a_ : int = entity_emb[entity_vocab["""[MASK]"""]]
a_ : int = LukeModel(config=a__).eval()
a_ , a_ : Optional[int] = model.load_state_dict(a__ , strict=a__)
if not (len(a__) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(a__)}. Expected only missing embeddings.position_ids''')
if not (all(key.startswith("""entity_predictions""") or key.startswith("""lm_head""") for key in unexpected_keys)):
raise ValueError(
"""Unexpected keys"""
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions") or key.startswith("lm_head"))])}''')
# Check outputs
a_ : List[Any] = LukeTokenizer.from_pretrained(a__ , task="""entity_classification""")
a_ : str = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
a_ : str = (3_9, 4_2)
a_ : Tuple = tokenizer(a__ , entity_spans=[span] , add_prefix_space=a__ , return_tensors="""pt""")
a_ : List[str] = model(**a__)
# Verify word hidden states
if model_size == "large":
a_ : Optional[int] = torch.Size((1, 4_2, 1_0_2_4))
a_ : List[str] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]])
else: # base
a_ : List[str] = torch.Size((1, 4_2, 7_6_8))
a_ : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''')
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
a_ : Dict = torch.Size((1, 1, 1_0_2_4))
a_ : int = torch.tensor([[0.0466, -0.0106, -0.0179]])
else: # base
a_ : Optional[Any] = torch.Size((1, 1, 7_6_8))
a_ : str = torch.tensor([[0.1457, 0.1044, 0.0174]])
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''')
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a__ , atol=1e-4):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(a__))
model.save_pretrained(a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = {}
with open(a__ , """r""" , encoding="""utf-8""") as f:
for index, line in enumerate(a__):
a_ , a_ : List[Any] = line.rstrip().split("""\t""")
a_ : Any = index
return entity_vocab
if __name__ == "__main__":
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case : List[str] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 540
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCamelCase_ : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowerCamelCase_ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = CamembertTokenizer
lowercase_ : Any = CamembertTokenizerFast
lowercase_ : Any = True
lowercase_ : Optional[Any] = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Optional[int] = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = '<pad>'
A_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case_ ) , 1_0_0_4 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
A_ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
A_ : Tuple = 'I was born in 92000, and this is falsé.'
A_ : Tuple = tokenizer.encode(snake_case_ )
A_ : Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
A_ : Optional[int] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
A_ : Union[str, Any] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A_ : int = tokenizer.convert_ids_to_tokens(snake_case_ )
A_ : int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
A_ : Dict = self.get_tokenizer()
A_ : List[Any] = self.get_rust_tokenizer()
A_ : int = 'I was born in 92000, and this is falsé.'
A_ : Any = tokenizer.tokenize(snake_case_ )
A_ : Optional[int] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
A_ : List[str] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
A_ : List[str] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
A_ : str = self.get_rust_tokenizer()
A_ : int = tokenizer.encode(snake_case_ )
A_ : Optional[Any] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = {'input_ids': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A_ : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=snake_case_ , )
| 714
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase_ ( ) -> int:
a__ : Any = HfArgumentParser(__a )
a__ : Any = parser.parse_args_into_dataclasses()[0]
a__ : Optional[int] = TensorFlowBenchmark(args=__a )
try:
a__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a__ : Tuple = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a__ : List[Any] = " ".join(str(__a ).split(" " )[:-1] )
a__ : str = ""
a__ : List[Any] = eval(str(__a ).split(" " )[-1] )
a__ : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__a )
if len(__a ) > 0:
a__ : Tuple = full_error_msg + begin_error_msg + str(__a )
raise ValueError(__a )
benchmark.run()
if __name__ == "__main__":
main()
| 37
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : int=False ):
__a : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__a : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__a : int = ''
else:
__a : List[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
__a : str = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : int = in_proj_weight[
: config.hidden_size, :
]
__a : Optional[Any] = in_proj_bias[: config.hidden_size]
__a : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__a : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ):
__a : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
__a : Tuple = dct.pop(lowerCamelCase_ )
__a : str = val
def UpperCAmelCase__ ( ):
__a : str = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple ):
__a : Dict = ViTConfig()
__a : Optional[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__a : Any = True
__a : Optional[int] = int(vit_name[-1_2:-1_0] )
__a : List[Any] = int(vit_name[-9:-6] )
else:
__a : Optional[int] = 1_0_0_0
__a : Any = 'huggingface/label-files'
__a : int = 'imagenet-1k-id2label.json'
__a : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
__a : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
__a : Union[str, Any] = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
__a : Optional[int] = int(vit_name[-6:-4] )
__a : int = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
__a : List[str] = 1_9_2
__a : Union[str, Any] = 7_6_8
__a : List[str] = 1_2
__a : Optional[Any] = 3
elif vit_name[9:].startswith('small' ):
__a : Optional[int] = 3_8_4
__a : Optional[Any] = 1_5_3_6
__a : str = 1_2
__a : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
__a : Any = 7_6_8
__a : Any = 2_3_0_4
__a : Optional[Any] = 8
__a : Optional[Any] = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
__a : Optional[Any] = 1_0_2_4
__a : Optional[Any] = 4_0_9_6
__a : List[str] = 2_4
__a : Dict = 1_6
elif vit_name[4:].startswith('huge' ):
__a : Tuple = 1_2_8_0
__a : str = 5_1_2_0
__a : Union[str, Any] = 3_2
__a : Dict = 1_6
# load original model from timm
__a : Optional[Any] = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__a : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase_ )
__a : List[Any] = create_rename_keys(lowerCamelCase_ , lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__a : Dict = ViTModel(lowerCamelCase_ ).eval()
else:
__a : str = ViTForImageClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__a : List[str] = DeiTImageProcessor(size=config.image_size )
else:
__a : int = ViTImageProcessor(size=config.image_size )
__a : Any = image_processor(images=prepare_img() , return_tensors='pt' )
__a : Optional[Any] = encoding['pixel_values']
__a : int = model(lowerCamelCase_ )
if base_model:
__a : Union[str, Any] = timm_model.forward_features(lowerCamelCase_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowerCamelCase_ , outputs.pooler_output , atol=1e-3 )
else:
__a : Tuple = timm_model(lowerCamelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 577
|
import torch
from transformers import AutoModel
class _UpperCamelCase( torch.nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Tuple="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
__a : List[str] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.nn.CosineSimilarity(3 , 1e-08 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __lowerCAmelCase ( self : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
return self.bert(**SCREAMING_SNAKE_CASE__ ).last_hidden_state
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a : Optional[int] = W_supports['sizes'].tolist()
__a : Dict = W_supports['start_token_id'].item()
__a : Tuple = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Tuple = self.BERT(**SCREAMING_SNAKE_CASE__ )
__a : Dict = None
__a : str = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Any = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__ ):
if i == 0:
__a : str = 0
else:
__a : str = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Dict = S[s : s + size][end_token_masks[s : s + size]]
__a : Optional[Any] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : List[Any] = torch.vstack((p_starts, p_start) )
__a : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
__a : str = p_start
__a : List[Any] = p_end
return p_starts, p_ends
| 577
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : int = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ["LayoutLMv3FeatureExtractor"]
snake_case_ : str = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 488
|
def __a ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = [0] * len(__UpperCAmelCase )
lowerCamelCase_ : Dict = []
lowerCamelCase_ : int = []
lowerCamelCase_ : Any = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
lowerCamelCase_ : int = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("Cycle exists" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
snake_case_ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 488
| 1
|
from __future__ import annotations
def _A ( __magic_name__ ):
return len(set(__magic_name__ ) ) == len(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611
|
from __future__ import annotations
def _A ( __magic_name__ ):
lowercase__ = 2
lowercase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__magic_name__ )
if n > 1:
factors.append(__magic_name__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( A_ ):
A__ : Tuple = "time_series_transformer"
A__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self : List[Any] , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "student_t" , snake_case__ : str = "nll" , snake_case__ : int = 1 , snake_case__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , snake_case__ : Optional[Union[str, bool]] = "mean" , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : int = 0 , snake_case__ : Optional[List[int]] = None , snake_case__ : Optional[List[int]] = None , snake_case__ : int = 32 , snake_case__ : int = 32 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : bool = True , snake_case__ : str = "gelu" , snake_case__ : int = 64 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 1_00 , snake_case__ : float = 0.02 , snake_case__ : List[str]=True , **snake_case__ : str , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = prediction_length
snake_case : Union[str, Any] = context_length or prediction_length
snake_case : Dict = distribution_output
snake_case : Optional[int] = loss
snake_case : Dict = input_size
snake_case : int = num_time_features
snake_case : Any = lags_sequence
snake_case : Union[str, Any] = scaling
snake_case : Optional[int] = num_dynamic_real_features
snake_case : Optional[Any] = num_static_real_features
snake_case : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
snake_case : Union[str, Any] = cardinality
else:
snake_case : Union[str, Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
snake_case : Dict = embedding_dimension
else:
snake_case : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case : Dict = num_parallel_samples
# Transformer architecture configuration
snake_case : List[Any] = input_size * len(snake_case__ ) + self._number_of_features
snake_case : int = d_model
snake_case : str = encoder_attention_heads
snake_case : List[Any] = decoder_attention_heads
snake_case : Any = encoder_ffn_dim
snake_case : Optional[Any] = decoder_ffn_dim
snake_case : Tuple = encoder_layers
snake_case : List[str] = decoder_layers
snake_case : str = dropout
snake_case : Tuple = attention_dropout
snake_case : Tuple = activation_dropout
snake_case : int = encoder_layerdrop
snake_case : Union[str, Any] = decoder_layerdrop
snake_case : Optional[Any] = activation_function
snake_case : Optional[Any] = init_std
snake_case : Dict = use_cache
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def _SCREAMING_SNAKE_CASE (self : List[str] ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 204
|
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : str = "" , snake_case__ : bool = False ) -> None:
'''simple docstring'''
snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case : int = is_leaf
snake_case : Dict = prefix
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
snake_case : Any = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : int , snake_case__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
snake_case : str = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case : Optional[int] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
snake_case : Dict = self.nodes[word[0]]
snake_case , snake_case , snake_case : int = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case : Tuple = remaining_prefix
snake_case : Dict = self.nodes[matching_string[0]]
snake_case : Tuple = RadixNode(snake_case__ , snake_case__ )
snake_case : List[str] = aux_node
if remaining_word == "":
snake_case : Tuple = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Union[str, Any] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case , snake_case , snake_case : Optional[int] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Optional[Any] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case , snake_case , snake_case : Optional[Any] = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case : List[Any] = list(self.nodes.values() )[0]
snake_case : Optional[int] = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case : str = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case : List[Any] = False
# If there is 1 edge, we merge it with its child
else:
snake_case : Any = list(incoming_node.nodes.values() )[0]
snake_case : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case : Union[str, Any] = merging_node.nodes
return True
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
snake_case : Dict = "banana bananas bandana band apple all beast".split()
snake_case : Tuple = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
snake_case : Union[str, Any] = RadixNode()
snake_case : List[str] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase )
print("Words:" , __lowerCamelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 204
| 1
|
_UpperCAmelCase : Union[str, Any] = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 453
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCAmelCase__ ( ):
lowercase , lowercase :Union[str, Any] = 9, 14 # noqa: F841
lowercase :Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase :Optional[Any] = defaultdict(lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase :Optional[int] = mst(lowerCamelCase )
lowercase :Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase :Any = tuple(answer[:2] )
lowercase :str = tuple(edge[::-1] )
assert edge in result or reverse in result
| 453
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :int = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
__lowerCamelCase :List[Any] = {
'gpt2': 1_024,
'gpt2-medium': 1_024,
'gpt2-large': 1_024,
'gpt2-xl': 1_024,
'distilgpt2': 1_024,
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =VOCAB_FILES_NAMES
snake_case__ : List[str] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any =['''input_ids''', '''attention_mask''']
snake_case__ : List[Any] =GPTaTokenizer
def __init__( self: Union[str, Any] , __a: Tuple=None , __a: Optional[Any]=None , __a: List[str]=None , __a: int="<|endoftext|>" , __a: Optional[int]="<|endoftext|>" , __a: Dict="<|endoftext|>" , __a: Tuple=False , **__a: List[Any] , )-> str:
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
lowerCamelCase : Optional[Any] = kwargs.pop("""add_bos_token""" , __a )
lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
lowerCamelCase : Optional[int] = getattr(__a , pre_tok_state.pop("""type""" ) )
lowerCamelCase : Any = add_prefix_space
lowerCamelCase : List[str] = pre_tok_class(**__a )
lowerCamelCase : Any = add_prefix_space
def a__ ( self: Optional[int] , *__a: Union[str, Any] , **__a: Optional[Any] )-> BatchEncoding:
lowerCamelCase : Optional[int] = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def a__ ( self: int , *__a: Tuple , **__a: Optional[int] )-> BatchEncoding:
lowerCamelCase : Tuple = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def a__ ( self: Union[str, Any] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def a__ ( self: Optional[Any] , __a: "Conversation" )-> List[int]:
lowerCamelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
lowerCamelCase : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 222
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case ( UpperCamelCase__ : Any ) -> Dict:
if "cls_token" in name:
lowerCamelCase : str = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCamelCase : int = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCamelCase : str = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase : List[str] = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Tuple = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCamelCase : Union[str, Any] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCamelCase : Tuple = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCamelCase : List[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCamelCase : str = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase : List[Any] = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase : str = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : Any = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : int = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase : List[str] = config.decoder_hidden_size
lowerCamelCase : str = """decoder.decoder_layers."""
if "weight" in key:
lowerCamelCase : Dict = val[:dim, :]
lowerCamelCase : Optional[Any] = val[dim : dim * 2, :]
lowerCamelCase : Tuple = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Optional[Any] = val[:dim]
lowerCamelCase : Optional[Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Optional[int] = config.hidden_size
lowerCamelCase : Tuple = """vit.encoder.layer."""
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
elif "bias" in key:
lowerCamelCase : Any = val[:dim]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase : Optional[Any] = val[-dim:]
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> Any:
lowerCamelCase : Optional[int] = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = 1024
lowerCamelCase : Union[str, Any] = 4096
lowerCamelCase : Dict = 24
lowerCamelCase : int = 16
elif "huge" in checkpoint_url:
lowerCamelCase : Dict = 14
lowerCamelCase : int = 1280
lowerCamelCase : Any = 5120
lowerCamelCase : int = 32
lowerCamelCase : Dict = 16
lowerCamelCase : str = ViTMAEForPreTraining(UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
lowerCamelCase : Optional[int] = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : int = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
lowerCamelCase : Tuple = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCamelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
lowerCamelCase : str = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase : Tuple = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase : Any = model(**UpperCamelCase__ )
lowerCamelCase : int = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase : List[Any] = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowerCamelCase : int = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 222
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowercase = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=6.0 , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=None , lowerCamelCase="fp4" , lowerCamelCase=False , **lowerCamelCase , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : List[Any] = load_in_abit
UpperCamelCase : List[Any] = load_in_abit
UpperCamelCase : int = llm_inta_threshold
UpperCamelCase : Dict = llm_inta_skip_modules
UpperCamelCase : Union[str, Any] = llm_inta_enable_fpaa_cpu_offload
UpperCamelCase : Dict = llm_inta_has_fpaa_weight
UpperCamelCase : Tuple = bnb_abit_quant_type
UpperCamelCase : List[str] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCamelCase : Optional[int] = torch.floataa
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCamelCase : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , torch.dtype ):
UpperCamelCase : Dict = bnb_abit_compute_dtype
else:
raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if not isinstance(self.llm_inta_threshold , lowerCAmelCase__ ):
raise ValueError("llm_int8_threshold must be a float" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , lowerCAmelCase__ ):
raise ValueError("llm_int8_skip_modules must be a list of strings" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , lowerCAmelCase__ ):
raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" )
if not isinstance(self.llm_inta_has_fpaa_weight , lowerCAmelCase__ ):
raise ValueError("llm_int8_has_fp16_weight must be a boolean" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" )
if not isinstance(self.bnb_abit_quant_type , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_quant_type must be a string" )
if not isinstance(self.bnb_abit_use_double_quant , lowerCAmelCase__ ):
raise ValueError("bnb_4bit_use_double_quant must be a boolean" )
if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse(
"0.39.0" ):
raise ValueError(
"4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCamelCase : List[str] = cls(**lowerCAmelCase__ )
UpperCamelCase : List[str] = []
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
to_remove.append(lowerCAmelCase__ )
for key in to_remove:
kwargs.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
UpperCamelCase : Tuple = self.to_dict()
UpperCamelCase : Dict = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
writer.write(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
UpperCamelCase : int = str(output["bnb_4bit_compute_dtype"] ).split("." )[1]
return output
def __repr__( self ) -> int:
'''simple docstring'''
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase = True ) -> str:
'''simple docstring'''
if use_diff is True:
UpperCamelCase : Optional[Any] = self.to_diff_dict()
else:
UpperCamelCase : Tuple = self.to_dict()
return json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + "\n"
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
UpperCamelCase : Dict = self.to_dict()
# get the default config dict
UpperCamelCase : Any = BitsAndBytesConfig().to_dict()
UpperCamelCase : Tuple = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCamelCase : Any = value
return serializable_config_dict
| 701
|
'''simple docstring'''
def A__ ( A : int = 10_00):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : List[Any] = 1, 1
UpperCamelCase : Tuple = []
for i in range(1 , n + 1):
UpperCamelCase : str = prev_numerator + 2 * prev_denominator
UpperCamelCase : Optional[Any] = prev_numerator + prev_denominator
if len(str(A)) > len(str(A)):
result.append(A)
UpperCamelCase : str = numerator
UpperCamelCase : int = denominator
return len(A)
if __name__ == "__main__":
print(f"""{solution() = }""")
| 435
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class A ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__a : str = field(default='''question-answering-extractive''', metadata={'''include_in_asdict_even_if_is_default''': True} )
__a : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
__a : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
__a : str = "question"
__a : str = "context"
__a : str = "answers"
@property
def _UpperCAmelCase ( self ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 208
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Tuple = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 102
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : str ):
return [ord(_lowerCamelCase ) - 9_6 for elem in plain]
def __magic_name__ ( _lowerCamelCase : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __magic_name__ ( ):
__a : List[str] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , _lowerCamelCase )
print("""Decoded:""" , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 63
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> List[str]:
UpperCAmelCase_ : Tuple = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Optional[Any] = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = object_detector(examples[0] , threshold=0.0 )
UpperCAmelCase_ : Tuple = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
"score": ANY(lowerCAmelCase_ ),
"label": ANY(lowerCAmelCase_ ),
"box": {"xmin": ANY(lowerCAmelCase_ ), "ymin": ANY(lowerCAmelCase_ ), "xmax": ANY(lowerCAmelCase_ ), "ymax": ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCAmelCase_ : Dict = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : int = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Any = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCAmelCase_ : List[str] = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
pass
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[int] = 0.2
UpperCAmelCase_ : Any = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = pipeline("zero-shot-object-detection" )
UpperCAmelCase_ : Dict = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 95
|
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__snake_case = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__="base" , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase__ )
snake_case : Tuple = 0
snake_case : List[str] = Path(self.hparams.output_dir )
snake_case : Any = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case : int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase__ , **UpperCamelCase__ , )
else:
snake_case : PretrainedConfig = config
snake_case : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase__ , UpperCamelCase__ ):
assert hasattr(self.config , UpperCamelCase__ ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , UpperCamelCase__ , getattr(self.hparams , UpperCamelCase__ ) )
if tokenizer is None:
snake_case : Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase__ , )
else:
snake_case : PreTrainedTokenizer = tokenizer
snake_case : List[str] = MODEL_MODES[mode]
if model is None:
snake_case : Union[str, Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase__ , )
else:
snake_case : List[Any] = model
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.model_type.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Dict = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case : Any = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case : List[str] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = self.model
snake_case : Tuple = ["bias", "LayerNorm.weight"]
snake_case : Any = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
snake_case : Optional[int] = Adafactor(
UpperCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase__ , relative_step=UpperCamelCase__ )
else:
snake_case : Optional[Any] = AdamW(
UpperCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case : Any = optimizer
snake_case : List[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.validation_step(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.validation_end(UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if stage == "test":
snake_case : Dict = len(self.test_dataloader().dataset )
else:
snake_case : str = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
snake_case : Dict = len(self.train_dataloader().dataset )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> int:
'''simple docstring'''
raise NotImplementedError("You must implement this for your task" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.train_loader
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase__ , list(filter(UpperCamelCase__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCamelCase ( self , UpperCamelCase__ ) -> None:
'''simple docstring'''
snake_case : str = self.output_dir.joinpath("best_tfmr" )
snake_case : int = self.step_count
self.model.save_pretrained(UpperCamelCase__ )
self.tokenizer.save_pretrained(UpperCamelCase__ )
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase__ ).parent / "test_run" / "cache" ) , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5e-5 , type=UpperCamelCase__ , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase__ , metavar=UpperCamelCase__ , type=UpperCamelCase__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase__ , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1e-8 , type=UpperCamelCase__ , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase__ , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase__ , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase__ )
parser.add_argument("--train_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--eval_batch_size" , default=32 , type=UpperCamelCase__ )
parser.add_argument("--adafactor" , action="store_true" )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase__ )
class _lowerCAmelCase ( pl.Callback ):
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = trainer.lr_schedulers[0]["scheduler"]
snake_case : str = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
rank_zero_info("***** Validation results *****" )
snake_case : List[str] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
rank_zero_info("***** Test results *****" )
snake_case : Dict = trainer.callback_metrics
# Log and save results to file
snake_case : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase__ , "w" ) as writer:
for key in sorted(UpperCamelCase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase__ , str(metrics[key] ) ) )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(lowercase ).parent / "test_run" / "model_checkpoints" ) , type=lowercase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=lowercase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=lowercase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=lowercase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=lowercase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(lowercase ).parent / "test_run" / "dummy-train-data" ) , type=lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def __lowerCAmelCase ( lowercase : BaseTransformer , lowercase : argparse.Namespace , lowercase : Any=None , lowercase : List[str]=True , lowercase : List[Any]=[] , lowercase : Any=None , lowercase : Optional[int]=None , **lowercase : List[Any] , ) -> Tuple:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
snake_case : Optional[int] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowercase )
# add custom checkpoints
if checkpoint_callback is None:
snake_case : int = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowercase )
if logging_callback is None:
snake_case : Tuple = LoggingCallback()
snake_case : str = {}
if args.fpaa:
snake_case : Union[str, Any] = 16
if args.gpus > 1:
snake_case : List[str] = "auto"
snake_case : int = "ddp"
snake_case : Dict = args.accumulate_grad_batches
snake_case : Tuple = None
snake_case : Any = "auto"
snake_case : int = pl.Trainer.from_argparse_args(
lowercase , weights_summary=lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase , )
if args.do_train:
trainer.fit(lowercase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 178
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( _snake_case ):
_lowerCamelCase : Dict = (DDPMScheduler,)
def lowercase ( self , **snake_case__ ):
lowerCAmelCase : Dict = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case_ )
return config
def lowercase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowercase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def lowercase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def lowercase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def lowercase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def lowercase ( self ):
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , )
def lowercase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def lowercase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def lowercase ( self ):
lowerCAmelCase : int = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Optional[int] = scheduler_class(**snake_case_ )
lowerCAmelCase : Any = len(snake_case_ )
lowerCAmelCase : str = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
lowerCAmelCase : List[str] = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : int = pred_prev_sample
lowerCAmelCase : Any = torch.sum(torch.abs(snake_case_ ) )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : str = len(snake_case_ )
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : Dict = self.dummy_sample_deter
lowerCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Dict = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : int = pred_prev_sample
lowerCAmelCase : Dict = torch.sum(torch.abs(snake_case_ ) )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def lowercase ( self ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
lowerCAmelCase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
lowerCAmelCase : Tuple = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
lowerCAmelCase : int = -1
else:
lowerCAmelCase : Optional[Any] = timesteps[i + 1]
lowerCAmelCase : str = scheduler.previous_timestep(snake_case_ )
lowerCAmelCase : List[Any] = prev_t.item()
self.assertEqual(snake_case_ , snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config()
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : Union[str, Any] = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case_ )
lowerCAmelCase : List[str] = [100, 87, 50, 1, 0]
lowerCAmelCase : Optional[int] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
lowerCAmelCase : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 721
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCAmelCase : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowerCAmelCase : Optional[Any] = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
_lowerCAmelCase : List[Any] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def lowercase ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[
'https://github.com/m-popovic/chrF',
] , )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ = CHRF.CHAR_ORDER , snake_case__ = CHRF.WORD_ORDER , snake_case__ = CHRF.BETA , snake_case__ = False , snake_case__ = False , snake_case__ = False , ):
lowerCAmelCase : List[str] = len(references[0] )
if any(len(snake_case__ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
lowerCAmelCase : List[str] = [[refs[i] for refs in references] for i in range(snake_case__ )]
lowerCAmelCase : Union[str, Any] = CHRF(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = sb_chrf.corpus_score(snake_case__ , snake_case__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 646
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.